text stringlengths 4 1.02M | meta dict |
|---|---|
"""
Django settings for our little example project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%n!!emy35vcr4)#w7#j2!20_soa^5&)3iqxig)w^f*zg!yn&yh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.staticfiles',
'offlinecdn',
)
ROOT_URLCONF = 'example.urls'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# configure offline CDN to work properly with staticfiles
OFFLINECDN_STATIC_ROOT = os.path.join(BASE_DIR, '.offlinecdn')
STATICFILES_DIRS = (
OFFLINECDN_STATIC_ROOT,
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
| {
"content_hash": "e2122bd99c0d0cd47aa2ffdbad444a01",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 71,
"avg_line_length": 23.815384615384616,
"alnum_prop": 0.708656330749354,
"repo_name": "gabegaster/django-offlinecdn",
"id": "0e468a17fa65aafe83e1e1082e155a323832d522",
"size": "1548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/example/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4311"
},
{
"name": "Python",
"bytes": "14676"
}
],
"symlink_target": ""
} |
import argparse
import collections
import contextlib
import itertools
import multiprocessing.pool
import os.path
import sqlite3
from typing import Callable
from typing import Counter
from typing import Dict
from typing import Generator
from typing import Iterable
from typing import List
from typing import Optional
from typing import Pattern
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Type
from typing import TypeVar
import pkg_resources
from git_code_debt import options
from git_code_debt.discovery import get_metric_parsers_from_args
from git_code_debt.file_diff_stat import FileDiffStat
from git_code_debt.file_diff_stat import get_file_diff_stats_from_output
from git_code_debt.generate_config import GenerateOptions
from git_code_debt.logic import get_metric_has_data
from git_code_debt.logic import get_metric_mapping
from git_code_debt.logic import get_metric_values
from git_code_debt.logic import get_previous_sha
from git_code_debt.metric import Metric
from git_code_debt.metrics.base import DiffParserBase
from git_code_debt.metrics.base import MetricInfo
from git_code_debt.repo_parser import Commit
from git_code_debt.repo_parser import RepoParser
from git_code_debt.util import yaml
from git_code_debt.write_logic import insert_metric_changes
from git_code_debt.write_logic import insert_metric_values
def get_metrics(
commit: Commit,
diff: bytes, metric_parsers:
Set[Type[DiffParserBase]],
exclude: Pattern[bytes],
) -> Tuple[Metric, ...]:
def get_all_metrics(
file_diff_stats: Tuple[FileDiffStat, ...],
) -> Generator[Metric, None, None]:
for metric_parser_cls in metric_parsers:
metric_parser = metric_parser_cls()
yield from metric_parser.get_metrics_from_stat(
commit, file_diff_stats,
)
file_diff_stats = tuple(
x for x in get_file_diff_stats_from_output(diff)
if not exclude.search(x.path)
)
return tuple(get_all_metrics(file_diff_stats))
def increment_metrics(
metric_values: Counter[int],
metric_mapping: Dict[str, int],
metrics: Tuple[Metric, ...],
) -> None:
metric_values.update({metric_mapping[m.name]: m.value for m in metrics})
def _get_metrics_inner(
mp_args: Tuple[
Optional[Commit],
Commit,
RepoParser,
Set[Type[DiffParserBase]],
Pattern[bytes],
],
) -> Tuple[Metric, ...]:
compare_commit, commit, repo_parser, metric_parsers, exclude = mp_args
if compare_commit is None:
diff = repo_parser.get_original_commit(commit.sha)
else:
diff = repo_parser.get_commit_diff(compare_commit.sha, commit.sha)
return get_metrics(commit, diff, metric_parsers, exclude)
T = TypeVar('T')
T2 = TypeVar('T2')
@contextlib.contextmanager
def mapper(jobs: int) -> Generator[
Callable[[Callable[[T2], T], Iterable[T2]], Iterable[T]], None, None,
]:
if jobs == 1:
yield map
else:
with contextlib.closing(multiprocessing.Pool(jobs)) as pool:
yield pool.imap
def update_has_data(
db: sqlite3.Connection,
metrics: Tuple[Metric, ...],
metric_mapping: Dict[str, int],
has_data: Dict[int, bool],
) -> None:
query = 'UPDATE metric_names SET has_data=1 WHERE id = ?'
for metric_id in [metric_mapping[m.name] for m in metrics if m.value]:
if not has_data[metric_id]:
has_data[metric_id] = True
db.execute(query, (metric_id,))
def load_data(
database_file: str,
repo: str,
package_names: List[str],
skip_defaults: bool,
exclude: Pattern[bytes],
jobs: int,
) -> None:
metric_parsers = get_metric_parsers_from_args(package_names, skip_defaults)
with sqlite3.connect(database_file) as db:
metric_mapping = get_metric_mapping(db)
has_data = get_metric_has_data(db)
repo_parser = RepoParser(repo)
with repo_parser.repo_checked_out():
previous_sha = get_previous_sha(db)
commits = repo_parser.get_commits(since_sha=previous_sha)
# If there is nothing to check gtfo
if len(commits) == 1 and previous_sha is not None:
return
# Maps metric_id to a running value
metric_values: Counter[int] = collections.Counter()
# Grab the state of our metrics at the last place
compare_commit = None
if previous_sha is not None:
compare_commit = commits.pop(0)
metric_values.update(get_metric_values(db, compare_commit.sha))
mp_args = zip(
[compare_commit, *commits],
commits,
itertools.repeat(repo_parser),
itertools.repeat(metric_parsers),
itertools.repeat(exclude),
)
# https://github.com/python/mypy/issues/11852
with mapper(jobs) as do_map: # type: ignore
for commit, metrics in zip(
commits, do_map(_get_metrics_inner, mp_args),
):
update_has_data(db, metrics, metric_mapping, has_data)
increment_metrics(metric_values, metric_mapping, metrics)
insert_metric_values(db, metric_values, has_data, commit)
insert_metric_changes(db, metrics, metric_mapping, commit)
def create_schema(db: sqlite3.Connection) -> None:
"""Creates the database schema."""
schema_dir = pkg_resources.resource_filename('git_code_debt', 'schema')
schema_files = os.listdir(schema_dir)
for sql_file in schema_files:
resource_filename = os.path.join(schema_dir, sql_file)
with open(resource_filename) as resource:
db.executescript(resource.read())
def get_metrics_info(
metric_parsers: Set[Type[DiffParserBase]],
) -> List[MetricInfo]:
metrics_info = set()
for metric_parser_cls in metric_parsers:
metrics_info.update(metric_parser_cls().get_metrics_info())
return sorted(metrics_info)
def insert_metrics_info(
db: sqlite3.Connection,
metrics_info: List[MetricInfo],
) -> None:
query = 'INSERT INTO metric_names (name, description) VALUES (?, ?)'
db.executemany(query, metrics_info)
def populate_metric_ids(
db: sqlite3.Connection,
package_names: List[str],
skip_defaults: bool,
) -> None:
metric_parsers = get_metric_parsers_from_args(package_names, skip_defaults)
metrics_info = get_metrics_info(metric_parsers)
insert_metrics_info(db, metrics_info)
def create_database(args: GenerateOptions) -> None:
with sqlite3.connect(args.database) as db:
create_schema(db)
populate_metric_ids(
db,
args.metric_package_names,
args.skip_default_metrics,
)
def get_options_from_config(config_filename: str) -> GenerateOptions:
if not os.path.exists(config_filename):
print(f'config file not found {config_filename}')
exit(1)
with open(config_filename) as config_file:
return GenerateOptions.from_yaml(yaml.load(config_file))
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
options.add_generate_config_filename(parser)
parser.add_argument(
'-j', '--jobs', type=int, default=multiprocessing.cpu_count(),
)
parsed_args = parser.parse_args(argv)
args = get_options_from_config(parsed_args.config_filename)
if not os.path.exists(args.database):
create_database(args)
load_data(
args.database,
args.repo,
args.metric_package_names,
args.skip_default_metrics,
args.exclude,
parsed_args.jobs,
)
return 0
if __name__ == '__main__':
raise SystemExit(main())
| {
"content_hash": "7a50ab08e48ba1d19137aa9234a9e9d2",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 79,
"avg_line_length": 31.848,
"alnum_prop": 0.6425521225822658,
"repo_name": "Yelp/git-code-debt",
"id": "b1dd3c5bc3d54610060549c446310c6f442284a2",
"size": "7962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "git_code_debt/generate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2514"
},
{
"name": "HTML",
"bytes": "1617"
},
{
"name": "JavaScript",
"bytes": "5161"
},
{
"name": "Makefile",
"bytes": "373"
},
{
"name": "Mako",
"bytes": "5510"
},
{
"name": "Python",
"bytes": "135421"
},
{
"name": "Shell",
"bytes": "217"
}
],
"symlink_target": ""
} |
"""Support for departure information for Rhein-Main public transport."""
import asyncio
from datetime import timedelta
import logging
from RMVtransport import RMVtransport
from RMVtransport.rmvtransport import RMVtransportApiConnectionError
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME, TIME_MINUTES
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_NEXT_DEPARTURE = "next_departure"
CONF_STATION = "station"
CONF_DESTINATIONS = "destinations"
CONF_DIRECTION = "direction"
CONF_LINES = "lines"
CONF_PRODUCTS = "products"
CONF_TIME_OFFSET = "time_offset"
CONF_MAX_JOURNEYS = "max_journeys"
CONF_TIMEOUT = "timeout"
DEFAULT_NAME = "RMV Journey"
VALID_PRODUCTS = ["U-Bahn", "Tram", "Bus", "S", "RB", "RE", "EC", "IC", "ICE"]
ICONS = {
"U-Bahn": "mdi:subway",
"Tram": "mdi:tram",
"Bus": "mdi:bus",
"S": "mdi:train",
"RB": "mdi:train",
"RE": "mdi:train",
"EC": "mdi:train",
"IC": "mdi:train",
"ICE": "mdi:train",
"SEV": "mdi:checkbox-blank-circle-outline",
None: "mdi:clock",
}
ATTRIBUTION = "Data provided by opendata.rmv.de"
SCAN_INTERVAL = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_NEXT_DEPARTURE): [
{
vol.Required(CONF_STATION): cv.string,
vol.Optional(CONF_DESTINATIONS, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_DIRECTION): cv.string,
vol.Optional(CONF_LINES, default=[]): vol.All(
cv.ensure_list, [cv.positive_int, cv.string]
),
vol.Optional(CONF_PRODUCTS, default=VALID_PRODUCTS): vol.All(
cv.ensure_list, [vol.In(VALID_PRODUCTS)]
),
vol.Optional(CONF_TIME_OFFSET, default=0): cv.positive_int,
vol.Optional(CONF_MAX_JOURNEYS, default=5): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
],
vol.Optional(CONF_TIMEOUT, default=10): cv.positive_int,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the RMV departure sensor."""
timeout = config.get(CONF_TIMEOUT)
session = async_get_clientsession(hass)
sensors = []
for next_departure in config.get(CONF_NEXT_DEPARTURE):
sensors.append(
RMVDepartureSensor(
session,
next_departure[CONF_STATION],
next_departure.get(CONF_DESTINATIONS),
next_departure.get(CONF_DIRECTION),
next_departure.get(CONF_LINES),
next_departure.get(CONF_PRODUCTS),
next_departure.get(CONF_TIME_OFFSET),
next_departure.get(CONF_MAX_JOURNEYS),
next_departure.get(CONF_NAME),
timeout,
)
)
tasks = [sensor.async_update() for sensor in sensors]
if tasks:
await asyncio.wait(tasks)
if not any(sensor.data for sensor in sensors):
raise PlatformNotReady
async_add_entities(sensors)
class RMVDepartureSensor(Entity):
"""Implementation of an RMV departure sensor."""
def __init__(
self,
session,
station,
destinations,
direction,
lines,
products,
time_offset,
max_journeys,
name,
timeout,
):
"""Initialize the sensor."""
self._station = station
self._name = name
self._state = None
self.data = RMVDepartureData(
session,
station,
destinations,
direction,
lines,
products,
time_offset,
max_journeys,
timeout,
)
self._icon = ICONS[None]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def available(self):
"""Return True if entity is available."""
return self._state is not None
@property
def state(self):
"""Return the next departure time."""
return self._state
@property
def state_attributes(self):
"""Return the state attributes."""
try:
return {
"next_departures": self.data.departures[1:],
"direction": self.data.departures[0].get("direction"),
"line": self.data.departures[0].get("line"),
"minutes": self.data.departures[0].get("minutes"),
"departure_time": self.data.departures[0].get("departure_time"),
"product": self.data.departures[0].get("product"),
ATTR_ATTRIBUTION: ATTRIBUTION,
}
except IndexError:
return {}
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return TIME_MINUTES
async def async_update(self):
"""Get the latest data and update the state."""
await self.data.async_update()
if self._name == DEFAULT_NAME:
self._name = self.data.station
self._station = self.data.station
if not self.data.departures:
self._state = None
self._icon = ICONS[None]
return
self._state = self.data.departures[0].get("minutes")
self._icon = ICONS[self.data.departures[0].get("product")]
class RMVDepartureData:
"""Pull data from the opendata.rmv.de web page."""
def __init__(
self,
session,
station_id,
destinations,
direction,
lines,
products,
time_offset,
max_journeys,
timeout,
):
"""Initialize the sensor."""
self.station = None
self._station_id = station_id
self._destinations = destinations
self._direction = direction
self._lines = lines
self._products = products
self._time_offset = time_offset
self._max_journeys = max_journeys
self.rmv = RMVtransport(session, timeout)
self.departures = []
self._error_notification = False
@Throttle(SCAN_INTERVAL)
async def async_update(self):
"""Update the connection data."""
try:
_data = await self.rmv.get_departures(
self._station_id,
products=self._products,
direction_id=self._direction,
max_journeys=50,
)
except RMVtransportApiConnectionError:
self.departures = []
_LOGGER.warning("Could not retrieve data from rmv.de")
return
self.station = _data.get("station")
_deps = []
_deps_not_found = set(self._destinations)
for journey in _data["journeys"]:
# find the first departure meeting the criteria
_nextdep = {}
if self._destinations:
dest_found = False
for dest in self._destinations:
if dest in journey["stops"]:
dest_found = True
if dest in _deps_not_found:
_deps_not_found.remove(dest)
_nextdep["destination"] = dest
if not dest_found:
continue
elif self._lines and journey["number"] not in self._lines:
continue
elif journey["minutes"] < self._time_offset:
continue
for attr in ["direction", "departure_time", "product", "minutes"]:
_nextdep[attr] = journey.get(attr, "")
_nextdep["line"] = journey.get("number", "")
_deps.append(_nextdep)
if len(_deps) > self._max_journeys:
break
if not self._error_notification and _deps_not_found:
self._error_notification = True
_LOGGER.info("Destination(s) %s not found", ", ".join(_deps_not_found))
self.departures = _deps
| {
"content_hash": "dce65d1a19cfb240ceb89d20b220badb",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 86,
"avg_line_length": 30.185964912280703,
"alnum_prop": 0.5636405904916889,
"repo_name": "titilambert/home-assistant",
"id": "76e75d77a58f67a89067d5ff4149b3689068b60d",
"size": "8603",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/rmvtransport/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "25849092"
},
{
"name": "Shell",
"bytes": "4410"
}
],
"symlink_target": ""
} |
from __future__ import division
from datetime import timedelta
import textwrap
import warnings
import numpy as np
from pandas._libs import lib, tslibs
from pandas._libs.tslibs import NaT, Timedelta, Timestamp, iNaT
from pandas._libs.tslibs.fields import get_timedelta_field
from pandas._libs.tslibs.timedeltas import (
array_to_timedelta64, parse_timedelta_unit)
import pandas.compat as compat
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import (
_NS_DTYPE, _TD_DTYPE, ensure_int64, is_datetime64_dtype, is_dtype_equal,
is_float_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_scalar,
is_string_dtype, is_timedelta64_dtype, is_timedelta64_ns_dtype,
pandas_dtype)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.generic import (
ABCDataFrame, ABCIndexClass, ABCSeries, ABCTimedeltaIndex)
from pandas.core.dtypes.missing import isna
from pandas.core import ops
from pandas.core.algorithms import checked_add_with_arr
import pandas.core.common as com
from pandas.tseries.frequencies import to_offset
from pandas.tseries.offsets import Tick
from . import datetimelike as dtl
_BAD_DTYPE = "dtype {dtype} cannot be converted to timedelta64[ns]"
def _is_convertible_to_td(key):
return isinstance(key, (Tick, timedelta,
np.timedelta64, compat.string_types))
def _field_accessor(name, alias, docstring=None):
def f(self):
values = self.asi8
result = get_timedelta_field(values, alias)
if self._hasnans:
result = self._maybe_mask_results(result, fill_value=None,
convert='float64')
return result
f.__name__ = name
f.__doc__ = "\n{}\n".format(docstring)
return property(f)
def _td_array_cmp(cls, op):
"""
Wrap comparison operations to convert timedelta-like to timedelta64
"""
opname = '__{name}__'.format(name=op.__name__)
nat_result = opname == '__ne__'
def wrapper(self, other):
if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)):
return NotImplemented
if _is_convertible_to_td(other) or other is NaT:
try:
other = Timedelta(other)
except ValueError:
# failed to parse as timedelta
return ops.invalid_comparison(self, other, op)
result = op(self.view('i8'), other.value)
if isna(other):
result.fill(nat_result)
elif not is_list_like(other):
return ops.invalid_comparison(self, other, op)
elif len(other) != len(self):
raise ValueError("Lengths must match")
else:
try:
other = type(self)._from_sequence(other)._data
except (ValueError, TypeError):
return ops.invalid_comparison(self, other, op)
result = op(self.view('i8'), other.view('i8'))
result = com.values_from_object(result)
o_mask = np.array(isna(other))
if o_mask.any():
result[o_mask] = nat_result
if self._hasnans:
result[self._isnan] = nat_result
return result
return compat.set_function_name(wrapper, opname, cls)
class TimedeltaArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps):
"""
Pandas ExtensionArray for timedelta data.
.. versionadded:: 0.24.0
.. warning::
TimedeltaArray is currently experimental, and its API may change
without warning. In particular, :attr:`TimedeltaArray.dtype` is
expected to change to be an instance of an ``ExtensionDtype``
subclass.
Parameters
----------
values : array-like
The timedelta data.
dtype : numpy.dtype
Currently, only ``numpy.dtype("timedelta64[ns]")`` is accepted.
freq : Offset, optional
copy : bool, default False
Whether to copy the underlying array of data.
"""
_typ = "timedeltaarray"
_scalar_type = Timedelta
__array_priority__ = 1000
# define my properties & methods for delegation
_other_ops = []
_bool_ops = []
_object_ops = ['freq']
_field_ops = ['days', 'seconds', 'microseconds', 'nanoseconds']
_datetimelike_ops = _field_ops + _object_ops + _bool_ops
_datetimelike_methods = ["to_pytimedelta", "total_seconds",
"round", "floor", "ceil"]
# Needed so that NaT.__richcmp__(DateTimeArray) operates pointwise
ndim = 1
@property
def _box_func(self):
return lambda x: Timedelta(x, unit='ns')
@property
def dtype(self):
"""
The dtype for the TimedeltaArray.
.. warning::
A future version of pandas will change dtype to be an instance
of a :class:`pandas.api.extensions.ExtensionDtype` subclass,
not a ``numpy.dtype``.
Returns
-------
numpy.dtype
"""
return _TD_DTYPE
# ----------------------------------------------------------------
# Constructors
_attributes = ["freq"]
def __init__(self, values, dtype=_TD_DTYPE, freq=None, copy=False):
if isinstance(values, (ABCSeries, ABCIndexClass)):
values = values._values
inferred_freq = getattr(values, "_freq", None)
if isinstance(values, type(self)):
if freq is None:
freq = values.freq
elif freq and values.freq:
freq = to_offset(freq)
freq, _ = dtl.validate_inferred_freq(freq, values.freq, False)
values = values._data
if not isinstance(values, np.ndarray):
msg = (
"Unexpected type '{}'. 'values' must be a TimedeltaArray "
"ndarray, or Series or Index containing one of those."
)
raise ValueError(msg.format(type(values).__name__))
if values.ndim != 1:
raise ValueError("Only 1-dimensional input arrays are supported.")
if values.dtype == 'i8':
# for compat with datetime/timedelta/period shared methods,
# we can sometimes get here with int64 values. These represent
# nanosecond UTC (or tz-naive) unix timestamps
values = values.view(_TD_DTYPE)
_validate_td64_dtype(values.dtype)
dtype = _validate_td64_dtype(dtype)
if freq == "infer":
msg = (
"Frequency inference not allowed in TimedeltaArray.__init__. "
"Use 'pd.array()' instead."
)
raise ValueError(msg)
if copy:
values = values.copy()
if freq:
freq = to_offset(freq)
self._data = values
self._dtype = dtype
self._freq = freq
if inferred_freq is None and freq is not None:
type(self)._validate_frequency(self, freq)
@classmethod
def _simple_new(cls, values, freq=None, dtype=_TD_DTYPE):
assert dtype == _TD_DTYPE, dtype
assert isinstance(values, np.ndarray), type(values)
result = object.__new__(cls)
result._data = values.view(_TD_DTYPE)
result._freq = to_offset(freq)
result._dtype = _TD_DTYPE
return result
@classmethod
def _from_sequence(cls, data, dtype=_TD_DTYPE, copy=False,
freq=None, unit=None):
if dtype:
_validate_td64_dtype(dtype)
freq, freq_infer = dtl.maybe_infer_freq(freq)
data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=unit)
freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq,
freq_infer)
result = cls._simple_new(data, freq=freq)
if inferred_freq is None and freq is not None:
# this condition precludes `freq_infer`
cls._validate_frequency(result, freq)
elif freq_infer:
# Set _freq directly to bypass duplicative _validate_frequency
# check.
result._freq = to_offset(result.inferred_freq)
return result
@classmethod
def _generate_range(cls, start, end, periods, freq, closed=None):
periods = dtl.validate_periods(periods)
if freq is None and any(x is None for x in [periods, start, end]):
raise ValueError('Must provide freq argument if no data is '
'supplied')
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError('Of the four parameters: start, end, periods, '
'and freq, exactly three must be specified')
if start is not None:
start = Timedelta(start)
if end is not None:
end = Timedelta(end)
if start is None and end is None:
if closed is not None:
raise ValueError("Closed has to be None if not both of start"
"and end are defined")
left_closed, right_closed = dtl.validate_endpoints(closed)
if freq is not None:
index = _generate_regular_range(start, end, periods, freq)
else:
index = np.linspace(start.value, end.value, periods).astype('i8')
if not left_closed:
index = index[1:]
if not right_closed:
index = index[:-1]
return cls._simple_new(index, freq=freq)
# ----------------------------------------------------------------
# DatetimeLike Interface
def _unbox_scalar(self, value):
if not isinstance(value, self._scalar_type) and value is not NaT:
raise ValueError("'value' should be a Timedelta.")
self._check_compatible_with(value)
return value.value
def _scalar_from_string(self, value):
return Timedelta(value)
def _check_compatible_with(self, other):
# we don't have anything to validate.
pass
def _maybe_clear_freq(self):
self._freq = None
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
@Appender(dtl.DatetimeLikeArrayMixin._validate_fill_value.__doc__)
def _validate_fill_value(self, fill_value):
if isna(fill_value):
fill_value = iNaT
elif isinstance(fill_value, (timedelta, np.timedelta64, Tick)):
fill_value = Timedelta(fill_value).value
else:
raise ValueError("'fill_value' should be a Timedelta. "
"Got '{got}'.".format(got=fill_value))
return fill_value
def astype(self, dtype, copy=True):
# We handle
# --> timedelta64[ns]
# --> timedelta64
# DatetimeLikeArrayMixin super call handles other cases
dtype = pandas_dtype(dtype)
if is_timedelta64_dtype(dtype) and not is_timedelta64_ns_dtype(dtype):
# by pandas convention, converting to non-nano timedelta64
# returns an int64-dtyped array with ints representing multiples
# of the desired timedelta unit. This is essentially division
if self._hasnans:
# avoid double-copying
result = self._data.astype(dtype, copy=False)
values = self._maybe_mask_results(result,
fill_value=None,
convert='float64')
return values
result = self._data.astype(dtype, copy=copy)
return result.astype('i8')
elif is_timedelta64_ns_dtype(dtype):
if copy:
return self.copy()
return self
return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy=copy)
# ----------------------------------------------------------------
# Rendering Methods
def _formatter(self, boxed=False):
from pandas.io.formats.format import _get_format_timedelta64
return _get_format_timedelta64(self, box=True)
def _format_native_types(self, na_rep='NaT', date_format=None):
from pandas.io.formats.format import _get_format_timedelta64
formatter = _get_format_timedelta64(self._data, na_rep)
return np.array([formatter(x) for x in self._data])
# ----------------------------------------------------------------
# Arithmetic Methods
_create_comparison_method = classmethod(_td_array_cmp)
def _add_offset(self, other):
assert not isinstance(other, Tick)
raise TypeError("cannot add the type {typ} to a {cls}"
.format(typ=type(other).__name__,
cls=type(self).__name__))
def _add_delta(self, delta):
"""
Add a timedelta-like, Tick, or TimedeltaIndex-like object
to self, yielding a new TimedeltaArray.
Parameters
----------
other : {timedelta, np.timedelta64, Tick,
TimedeltaIndex, ndarray[timedelta64]}
Returns
-------
result : TimedeltaArray
"""
new_values = super(TimedeltaArray, self)._add_delta(delta)
return type(self)._from_sequence(new_values, freq='infer')
def _add_datetime_arraylike(self, other):
"""
Add DatetimeArray/Index or ndarray[datetime64] to TimedeltaArray.
"""
if isinstance(other, np.ndarray):
# At this point we have already checked that dtype is datetime64
from pandas.core.arrays import DatetimeArray
other = DatetimeArray(other)
# defer to implementation in DatetimeArray
return other + self
def _add_datetimelike_scalar(self, other):
# adding a timedeltaindex to a datetimelike
from pandas.core.arrays import DatetimeArray
assert other is not NaT
other = Timestamp(other)
if other is NaT:
# In this case we specifically interpret NaT as a datetime, not
# the timedelta interpretation we would get by returning self + NaT
result = self.asi8.view('m8[ms]') + NaT.to_datetime64()
return DatetimeArray(result)
i8 = self.asi8
result = checked_add_with_arr(i8, other.value,
arr_mask=self._isnan)
result = self._maybe_mask_results(result)
dtype = DatetimeTZDtype(tz=other.tz) if other.tz else _NS_DTYPE
return DatetimeArray(result, dtype=dtype, freq=self.freq)
def _addsub_offset_array(self, other, op):
# Add or subtract Array-like of DateOffset objects
try:
# TimedeltaIndex can only operate with a subset of DateOffset
# subclasses. Incompatible classes will raise AttributeError,
# which we re-raise as TypeError
return super(TimedeltaArray, self)._addsub_offset_array(
other, op
)
except AttributeError:
raise TypeError("Cannot add/subtract non-tick DateOffset to {cls}"
.format(cls=type(self).__name__))
def __mul__(self, other):
other = lib.item_from_zerodim(other)
if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)):
return NotImplemented
if is_scalar(other):
# numpy will accept float and int, raise TypeError for others
result = self._data * other
freq = None
if self.freq is not None and not isna(other):
freq = self.freq * other
return type(self)(result, freq=freq)
if not hasattr(other, "dtype"):
# list, tuple
other = np.array(other)
if len(other) != len(self) and not is_timedelta64_dtype(other):
# Exclude timedelta64 here so we correctly raise TypeError
# for that instead of ValueError
raise ValueError("Cannot multiply with unequal lengths")
if is_object_dtype(other):
# this multiplication will succeed only if all elements of other
# are int or float scalars, so we will end up with
# timedelta64[ns]-dtyped result
result = [self[n] * other[n] for n in range(len(self))]
result = np.array(result)
return type(self)(result)
# numpy will accept float or int dtype, raise TypeError for others
result = self._data * other
return type(self)(result)
__rmul__ = __mul__
def __truediv__(self, other):
# timedelta / X is well-defined for timedelta-like or numeric X
other = lib.item_from_zerodim(other)
if isinstance(other, (ABCSeries, ABCDataFrame, ABCIndexClass)):
return NotImplemented
if isinstance(other, (timedelta, np.timedelta64, Tick)):
other = Timedelta(other)
if other is NaT:
# specifically timedelta64-NaT
result = np.empty(self.shape, dtype=np.float64)
result.fill(np.nan)
return result
# otherwise, dispatch to Timedelta implementation
return self._data / other
elif lib.is_scalar(other):
# assume it is numeric
result = self._data / other
freq = None
if self.freq is not None:
# Tick division is not implemented, so operate on Timedelta
freq = self.freq.delta / other
return type(self)(result, freq=freq)
if not hasattr(other, "dtype"):
# e.g. list, tuple
other = np.array(other)
if len(other) != len(self):
raise ValueError("Cannot divide vectors with unequal lengths")
elif is_timedelta64_dtype(other):
# let numpy handle it
return self._data / other
elif is_object_dtype(other):
# Note: we do not do type inference on the result, so either
# an object array or numeric-dtyped (if numpy does inference)
# will be returned. GH#23829
result = [self[n] / other[n] for n in range(len(self))]
result = np.array(result)
return result
else:
result = self._data / other
return type(self)(result)
def __rtruediv__(self, other):
# X / timedelta is defined only for timedelta-like X
other = lib.item_from_zerodim(other)
if isinstance(other, (ABCSeries, ABCDataFrame, ABCIndexClass)):
return NotImplemented
if isinstance(other, (timedelta, np.timedelta64, Tick)):
other = Timedelta(other)
if other is NaT:
# specifically timedelta64-NaT
result = np.empty(self.shape, dtype=np.float64)
result.fill(np.nan)
return result
# otherwise, dispatch to Timedelta implementation
return other / self._data
elif lib.is_scalar(other):
raise TypeError("Cannot divide {typ} by {cls}"
.format(typ=type(other).__name__,
cls=type(self).__name__))
if not hasattr(other, "dtype"):
# e.g. list, tuple
other = np.array(other)
if len(other) != len(self):
raise ValueError("Cannot divide vectors with unequal lengths")
elif is_timedelta64_dtype(other):
# let numpy handle it
return other / self._data
elif is_object_dtype(other):
# Note: unlike in __truediv__, we do not _need_ to do type#
# inference on the result. It does not raise, a numeric array
# is returned. GH#23829
result = [other[n] / self[n] for n in range(len(self))]
return np.array(result)
else:
raise TypeError("Cannot divide {dtype} data by {cls}"
.format(dtype=other.dtype,
cls=type(self).__name__))
if compat.PY2:
__div__ = __truediv__
__rdiv__ = __rtruediv__
def __floordiv__(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame, ABCIndexClass)):
return NotImplemented
other = lib.item_from_zerodim(other)
if is_scalar(other):
if isinstance(other, (timedelta, np.timedelta64, Tick)):
other = Timedelta(other)
if other is NaT:
# treat this specifically as timedelta-NaT
result = np.empty(self.shape, dtype=np.float64)
result.fill(np.nan)
return result
# dispatch to Timedelta implementation
result = other.__rfloordiv__(self._data)
return result
# at this point we should only have numeric scalars; anything
# else will raise
result = self.asi8 // other
result[self._isnan] = iNaT
freq = None
if self.freq is not None:
# Note: freq gets division, not floor-division
freq = self.freq / other
return type(self)(result.view('m8[ns]'), freq=freq)
if not hasattr(other, "dtype"):
# list, tuple
other = np.array(other)
if len(other) != len(self):
raise ValueError("Cannot divide with unequal lengths")
elif is_timedelta64_dtype(other):
other = type(self)(other)
# numpy timedelta64 does not natively support floordiv, so operate
# on the i8 values
result = self.asi8 // other.asi8
mask = self._isnan | other._isnan
if mask.any():
result = result.astype(np.int64)
result[mask] = np.nan
return result
elif is_object_dtype(other):
result = [self[n] // other[n] for n in range(len(self))]
result = np.array(result)
if lib.infer_dtype(result, skipna=False) == 'timedelta':
result, _ = sequence_to_td64ns(result)
return type(self)(result)
return result
elif is_integer_dtype(other) or is_float_dtype(other):
result = self._data // other
return type(self)(result)
else:
dtype = getattr(other, "dtype", type(other).__name__)
raise TypeError("Cannot divide {typ} by {cls}"
.format(typ=dtype, cls=type(self).__name__))
def __rfloordiv__(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame, ABCIndexClass)):
return NotImplemented
other = lib.item_from_zerodim(other)
if is_scalar(other):
if isinstance(other, (timedelta, np.timedelta64, Tick)):
other = Timedelta(other)
if other is NaT:
# treat this specifically as timedelta-NaT
result = np.empty(self.shape, dtype=np.float64)
result.fill(np.nan)
return result
# dispatch to Timedelta implementation
result = other.__floordiv__(self._data)
return result
raise TypeError("Cannot divide {typ} by {cls}"
.format(typ=type(other).__name__,
cls=type(self).__name__))
if not hasattr(other, "dtype"):
# list, tuple
other = np.array(other)
if len(other) != len(self):
raise ValueError("Cannot divide with unequal lengths")
elif is_timedelta64_dtype(other):
other = type(self)(other)
# numpy timedelta64 does not natively support floordiv, so operate
# on the i8 values
result = other.asi8 // self.asi8
mask = self._isnan | other._isnan
if mask.any():
result = result.astype(np.int64)
result[mask] = np.nan
return result
elif is_object_dtype(other):
result = [other[n] // self[n] for n in range(len(self))]
result = np.array(result)
return result
else:
dtype = getattr(other, "dtype", type(other).__name__)
raise TypeError("Cannot divide {typ} by {cls}"
.format(typ=dtype, cls=type(self).__name__))
def __mod__(self, other):
# Note: This is a naive implementation, can likely be optimized
if isinstance(other, (ABCSeries, ABCDataFrame, ABCIndexClass)):
return NotImplemented
other = lib.item_from_zerodim(other)
if isinstance(other, (timedelta, np.timedelta64, Tick)):
other = Timedelta(other)
return self - (self // other) * other
def __rmod__(self, other):
# Note: This is a naive implementation, can likely be optimized
if isinstance(other, (ABCSeries, ABCDataFrame, ABCIndexClass)):
return NotImplemented
other = lib.item_from_zerodim(other)
if isinstance(other, (timedelta, np.timedelta64, Tick)):
other = Timedelta(other)
return other - (other // self) * self
def __divmod__(self, other):
# Note: This is a naive implementation, can likely be optimized
if isinstance(other, (ABCSeries, ABCDataFrame, ABCIndexClass)):
return NotImplemented
other = lib.item_from_zerodim(other)
if isinstance(other, (timedelta, np.timedelta64, Tick)):
other = Timedelta(other)
res1 = self // other
res2 = self - res1 * other
return res1, res2
def __rdivmod__(self, other):
# Note: This is a naive implementation, can likely be optimized
if isinstance(other, (ABCSeries, ABCDataFrame, ABCIndexClass)):
return NotImplemented
other = lib.item_from_zerodim(other)
if isinstance(other, (timedelta, np.timedelta64, Tick)):
other = Timedelta(other)
res1 = other // self
res2 = other - res1 * self
return res1, res2
# Note: TimedeltaIndex overrides this in call to cls._add_numeric_methods
def __neg__(self):
if self.freq is not None:
return type(self)(-self._data, freq=-self.freq)
return type(self)(-self._data)
def __abs__(self):
# Note: freq is not preserved
return type(self)(np.abs(self._data))
# ----------------------------------------------------------------
# Conversion Methods - Vectorized analogues of Timedelta methods
def total_seconds(self):
"""
Return total duration of each element expressed in seconds.
This method is available directly on TimedeltaArray, TimedeltaIndex
and on Series containing timedelta values under the ``.dt`` namespace.
Returns
-------
seconds : [ndarray, Float64Index, Series]
When the calling object is a TimedeltaArray, the return type
is ndarray. When the calling object is a TimedeltaIndex,
the return type is a Float64Index. When the calling object
is a Series, the return type is Series of type `float64` whose
index is the same as the original.
See Also
--------
datetime.timedelta.total_seconds : Standard library version
of this method.
TimedeltaIndex.components : Return a DataFrame with components of
each Timedelta.
Examples
--------
**Series**
>>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d'))
>>> s
0 0 days
1 1 days
2 2 days
3 3 days
4 4 days
dtype: timedelta64[ns]
>>> s.dt.total_seconds()
0 0.0
1 86400.0
2 172800.0
3 259200.0
4 345600.0
dtype: float64
**TimedeltaIndex**
>>> idx = pd.to_timedelta(np.arange(5), unit='d')
>>> idx
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq=None)
>>> idx.total_seconds()
Float64Index([0.0, 86400.0, 172800.0, 259200.00000000003, 345600.0],
dtype='float64')
"""
return self._maybe_mask_results(1e-9 * self.asi8, fill_value=None)
def to_pytimedelta(self):
"""
Return Timedelta Array/Index as object ndarray of datetime.timedelta
objects.
Returns
-------
datetimes : ndarray
"""
return tslibs.ints_to_pytimedelta(self.asi8)
days = _field_accessor("days", "days",
"Number of days for each element.")
seconds = _field_accessor("seconds", "seconds",
"Number of seconds (>= 0 and less than 1 day) "
"for each element.")
microseconds = _field_accessor("microseconds", "microseconds",
"Number of microseconds (>= 0 and less "
"than 1 second) for each element.")
nanoseconds = _field_accessor("nanoseconds", "nanoseconds",
"Number of nanoseconds (>= 0 and less "
"than 1 microsecond) for each element.")
@property
def components(self):
"""
Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame
"""
from pandas import DataFrame
columns = ['days', 'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds', 'nanoseconds']
hasnans = self._hasnans
if hasnans:
def f(x):
if isna(x):
return [np.nan] * len(columns)
return x.components
else:
def f(x):
return x.components
result = DataFrame([f(x) for x in self], columns=columns)
if not hasnans:
result = result.astype('int64')
return result
TimedeltaArray._add_comparison_ops()
# ---------------------------------------------------------------------
# Constructor Helpers
def sequence_to_td64ns(data, copy=False, unit="ns", errors="raise"):
"""
Parameters
----------
array : list-like
copy : bool, default False
unit : str, default "ns"
The timedelta unit to treat integers as multiples of.
errors : {"raise", "coerce", "ignore"}, default "raise"
How to handle elements that cannot be converted to timedelta64[ns].
See ``pandas.to_timedelta`` for details.
Returns
-------
converted : numpy.ndarray
The sequence converted to a numpy array with dtype ``timedelta64[ns]``.
inferred_freq : Tick or None
The inferred frequency of the sequence.
Raises
------
ValueError : Data cannot be converted to timedelta64[ns].
Notes
-----
Unlike `pandas.to_timedelta`, if setting ``errors=ignore`` will not cause
errors to be ignored; they are caught and subsequently ignored at a
higher level.
"""
inferred_freq = None
unit = parse_timedelta_unit(unit)
# Unwrap whatever we have into a np.ndarray
if not hasattr(data, 'dtype'):
# e.g. list, tuple
if np.ndim(data) == 0:
# i.e. generator
data = list(data)
data = np.array(data, copy=False)
elif isinstance(data, ABCSeries):
data = data._values
elif isinstance(data, (ABCTimedeltaIndex, TimedeltaArray)):
inferred_freq = data.freq
data = data._data
# Convert whatever we have into timedelta64[ns] dtype
if is_object_dtype(data.dtype) or is_string_dtype(data.dtype):
# no need to make a copy, need to convert if string-dtyped
data = objects_to_td64ns(data, unit=unit, errors=errors)
copy = False
elif is_integer_dtype(data.dtype):
# treat as multiples of the given unit
data, copy_made = ints_to_td64ns(data, unit=unit)
copy = copy and not copy_made
elif is_float_dtype(data.dtype):
# treat as multiples of the given unit. If after converting to nanos,
# there are fractional components left, these are truncated
# (i.e. NOT rounded)
mask = np.isnan(data)
coeff = np.timedelta64(1, unit) / np.timedelta64(1, 'ns')
data = (coeff * data).astype(np.int64).view('timedelta64[ns]')
data[mask] = iNaT
copy = False
elif is_timedelta64_dtype(data.dtype):
if data.dtype != _TD_DTYPE:
# non-nano unit
# TODO: watch out for overflows
data = data.astype(_TD_DTYPE)
copy = False
elif is_datetime64_dtype(data):
# GH#23539
warnings.warn("Passing datetime64-dtype data to TimedeltaIndex is "
"deprecated, will raise a TypeError in a future "
"version",
FutureWarning, stacklevel=4)
data = ensure_int64(data).view(_TD_DTYPE)
else:
raise TypeError("dtype {dtype} cannot be converted to timedelta64[ns]"
.format(dtype=data.dtype))
data = np.array(data, copy=copy)
if data.ndim != 1:
raise ValueError("Only 1-dimensional input arrays are supported.")
assert data.dtype == 'm8[ns]', data
return data, inferred_freq
def ints_to_td64ns(data, unit="ns"):
"""
Convert an ndarray with integer-dtype to timedelta64[ns] dtype, treating
the integers as multiples of the given timedelta unit.
Parameters
----------
data : numpy.ndarray with integer-dtype
unit : str, default "ns"
The timedelta unit to treat integers as multiples of.
Returns
-------
numpy.ndarray : timedelta64[ns] array converted from data
bool : whether a copy was made
"""
copy_made = False
unit = unit if unit is not None else "ns"
if data.dtype != np.int64:
# converting to int64 makes a copy, so we can avoid
# re-copying later
data = data.astype(np.int64)
copy_made = True
if unit != "ns":
dtype_str = "timedelta64[{unit}]".format(unit=unit)
data = data.view(dtype_str)
# TODO: watch out for overflows when converting from lower-resolution
data = data.astype("timedelta64[ns]")
# the astype conversion makes a copy, so we can avoid re-copying later
copy_made = True
else:
data = data.view("timedelta64[ns]")
return data, copy_made
def objects_to_td64ns(data, unit="ns", errors="raise"):
"""
Convert a object-dtyped or string-dtyped array into an
timedelta64[ns]-dtyped array.
Parameters
----------
data : ndarray or Index
unit : str, default "ns"
The timedelta unit to treat integers as multiples of.
errors : {"raise", "coerce", "ignore"}, default "raise"
How to handle elements that cannot be converted to timedelta64[ns].
See ``pandas.to_timedelta`` for details.
Returns
-------
numpy.ndarray : timedelta64[ns] array converted from data
Raises
------
ValueError : Data cannot be converted to timedelta64[ns].
Notes
-----
Unlike `pandas.to_timedelta`, if setting `errors=ignore` will not cause
errors to be ignored; they are caught and subsequently ignored at a
higher level.
"""
# coerce Index to np.ndarray, converting string-dtype if necessary
values = np.array(data, dtype=np.object_, copy=False)
result = array_to_timedelta64(values,
unit=unit, errors=errors)
return result.view('timedelta64[ns]')
def _validate_td64_dtype(dtype):
dtype = pandas_dtype(dtype)
if is_dtype_equal(dtype, np.dtype("timedelta64")):
dtype = _TD_DTYPE
msg = textwrap.dedent("""\
Passing in 'timedelta' dtype with no precision is deprecated
and will raise in a future version. Please pass in
'timedelta64[ns]' instead.""")
warnings.warn(msg, FutureWarning, stacklevel=4)
if not is_dtype_equal(dtype, _TD_DTYPE):
raise ValueError(_BAD_DTYPE.format(dtype=dtype))
return dtype
def _generate_regular_range(start, end, periods, offset):
stride = offset.nanos
if periods is None:
b = Timedelta(start).value
e = Timedelta(end).value
e += stride - e % stride
elif start is not None:
b = Timedelta(start).value
e = b + periods * stride
elif end is not None:
e = Timedelta(end).value + stride
b = e - periods * stride
else:
raise ValueError("at least 'start' or 'end' should be specified "
"if a 'period' is given.")
data = np.arange(b, e, stride, dtype=np.int64)
return data
| {
"content_hash": "bf53d2fcad3dee85dd39ad95bd28c549",
"timestamp": "",
"source": "github",
"line_count": 1065,
"max_line_length": 79,
"avg_line_length": 34.91830985915493,
"alnum_prop": 0.5669033021404755,
"repo_name": "MJuddBooth/pandas",
"id": "74fe8072e692410839c2c8c1170489c60210424e",
"size": "37212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/core/arrays/timedeltas.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4879"
},
{
"name": "C",
"bytes": "406766"
},
{
"name": "C++",
"bytes": "17248"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "14858932"
},
{
"name": "Shell",
"bytes": "29575"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
} |
import logging
logging.basicConfig()
import av
from av.codec import CodecContext, CodecParser
from av.video import VideoFrame
from av.packet import Packet
cc = CodecContext.create('mpeg4', 'r')
print(cc)
fh = open('test.mp4', 'r')
frame_count = 0
while True:
chunk = fh.read(819200)
for packet in cc.parse(chunk or None, allow_stream=True):
print(packet)
for frame in cc.decode(packet) or ():
print(frame)
img = frame.to_image()
img.save('sandbox/test.%04d.jpg' % frame_count)
frame_count += 1
if not chunk:
break # EOF!
| {
"content_hash": "58c99010e71eca496afd9307dea9df05",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 61,
"avg_line_length": 19.25,
"alnum_prop": 0.6217532467532467,
"repo_name": "PyAV-Org/PyAV",
"id": "bdb6724f4186cbcf8a03961a766d9e9e13345078",
"size": "616",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scratchpad/cctx_decode.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Cython",
"bytes": "313993"
},
{
"name": "Makefile",
"bytes": "1819"
},
{
"name": "Python",
"bytes": "228269"
},
{
"name": "Shell",
"bytes": "6549"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
# encoding: UTF-8
import unittest
import pytest
from six import iteritems
from mock import patch
from tests.common import FakeUser
from tml.strings import to_string
from tml.token.transform import TransformToken, Error, PIPE_CHAR, DBL_PIPE_CHAR
import tml.token.data
@pytest.mark.usefixtures("init_app")
class TransformTokenTest(unittest.TestCase):
def setUp(self):
self.app = self.init_app()
self.en = self.app.language('en')
self.ru = self.app.language('ru')
self.label = "This is {user|he,she,it}"
self.token = TransformToken.parse(self.label)[0]
def test_parse(self):
self.assertIsInstance(self.token, TransformToken)
self.assertEquals(self.token.short_name, 'user')
self.assertEquals(self.token.context_keys, [])
self.assertEquals(self.token.case_keys, [])
self.assertEquals(self.token.piped_params, 'he,she,it')
self.assertEquals(self.token.pipe_separator, PIPE_CHAR)
self.assertFalse(self.token.token_value_displayed())
self.assertTrue(self.token.is_implied())
def test_misc(self):
token = TransformToken.parse("This is {user||he,she,it}")[0]
self.assertEquals(token.piped_params, 'he,she,it')
self.assertEquals(token.pipe_separator, DBL_PIPE_CHAR)
self.assertTrue(token.token_value_displayed())
self.assertFalse(token.is_implied())
@patch('tml.token.data.is_language_cases_enabled', return_value=True)
def test_complete_substitution(self, _):
case_dict = {
'This is {user|he,she,it}': [
({'user': FakeUser(gender='male')}, 'This is he'),
({'user': FakeUser(gender='female')}, 'This is she'),
({'user': FakeUser(gender='other')}, 'This is it')],
'This is {user||he,she,it}': [
({'user': FakeUser(gender='male')}, 'This is Tom Anderson he'),
({'user': FakeUser(gender='female')}, 'This is Tom Anderson she'),
({'user': FakeUser(gender='other')}, 'This is Tom Anderson it')],
'This is {user|male: he, female: she, other: he/she}': [
({'user': FakeUser(gender='male')}, 'This is he'),
({'user': FakeUser(gender='female')}, 'This is she'),
({'user': FakeUser(gender='other')}, 'This is he/she')],
'{count||message}': [
({'count': 1}, '1 message'),
({'count': 2}, '2 messages'),],
}
for label, cases in iteritems(case_dict):
token = TransformToken.parse(label)[0]
# print label
for case in cases:
self.assertEquals(token.substitute(label, case[0], self.en), case[1])
| {
"content_hash": "72a3844a97c62baa59a4949aa3ae5b0e",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 85,
"avg_line_length": 42.76923076923077,
"alnum_prop": 0.5946043165467626,
"repo_name": "translationexchange/tml-python",
"id": "7210aeb5c90b0e408e5cdfe7ad4f585439b31a4f",
"size": "2780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/token/transform.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1262"
},
{
"name": "Python",
"bytes": "446575"
},
{
"name": "Shell",
"bytes": "294"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
class Record(models.Model):
user = models.ForeignKey(User, verbose_name='User')
title = models.CharField(verbose_name='Title', default='', max_length=255)
duration = models.CharField(verbose_name='Duration', default='00:00', max_length=100)
filename = models.CharField(verbose_name='Filename', default=None, max_length=255)
file = models.FileField(verbose_name='File', default=None)
text = models.TextField(verbose_name='Text', default=None, null=True, blank=True)
is_korean = models.BooleanField(default=False)
is_uploaded = models.BooleanField(default=False)
is_converted = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
@property
def datetime(self):
return self.created.strftime("%Y-%m-%d %-I:%-M%p")
| {
"content_hash": "e46f834383fb682a6a8beba5324b831a",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 89,
"avg_line_length": 42.09090909090909,
"alnum_prop": 0.7170626349892009,
"repo_name": "lecrec/backend",
"id": "1019dad465545760ac031f9088d76de98aadeb20",
"size": "926",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lecrec/api/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24388"
},
{
"name": "Shell",
"bytes": "71"
}
],
"symlink_target": ""
} |
import mock
from oslo_config import cfg
from oslo_utils import uuidutils
from saharaclient.api import base as sahara_base
from rally import consts
from rally import exceptions
from rally.plugins.openstack.scenarios.sahara import utils
from tests.unit import test
CONF = cfg.CONF
SAHARA_UTILS = "rally.plugins.openstack.scenarios.sahara.utils"
class SaharaScenarioTestCase(test.ScenarioTestCase):
# NOTE(stpierre): the Sahara utils generally do funny stuff with
# wait_for() calls -- frequently the the is_ready and
# update_resource arguments are functions defined in the Sahara
# utils themselves instead of the more standard resource_is() and
# get_from_manager() calls. As a result, the tests below do more
# integrated/functional testing of wait_for() calls, and we can't
# just mock out wait_for and friends the way we usually do.
patch_benchmark_utils = False
def setUp(self):
super(SaharaScenarioTestCase, self).setUp()
CONF.set_override("sahara_cluster_check_interval", 0, "benchmark")
CONF.set_override("sahara_job_check_interval", 0, "benchmark")
def test_list_node_group_templates(self):
ngts = []
self.clients("sahara").node_group_templates.list.return_value = ngts
scenario = utils.SaharaScenario(self.context)
return_ngts_list = scenario._list_node_group_templates()
self.assertEqual(ngts, return_ngts_list)
self._test_atomic_action_timer(scenario.atomic_actions(),
"sahara.list_node_group_templates")
@mock.patch(SAHARA_UTILS + ".SaharaScenario._generate_random_name",
return_value="random_name")
@mock.patch(SAHARA_UTILS + ".sahara_consts")
def test_create_node_group_templates(
self, mock_sahara_consts,
mock__generate_random_name):
scenario = utils.SaharaScenario(self.context)
mock_processes = {
"test_plugin": {
"test_version": {
"master": ["p1"],
"worker": ["p2"]
}
}
}
mock_sahara_consts.NODE_PROCESSES = mock_processes
scenario._create_master_node_group_template(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version"
)
scenario._create_worker_node_group_template(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version"
)
create_calls = [
mock.call(
name="random_name",
plugin_name="test_plugin",
hadoop_version="test_version",
flavor_id="test_flavor",
node_processes=["p1"]),
mock.call(
name="random_name",
plugin_name="test_plugin",
hadoop_version="test_version",
flavor_id="test_flavor",
node_processes=["p2"]
)]
self.clients("sahara").node_group_templates.create.assert_has_calls(
create_calls)
self._test_atomic_action_timer(
scenario.atomic_actions(),
"sahara.create_master_node_group_template")
self._test_atomic_action_timer(
scenario.atomic_actions(),
"sahara.create_worker_node_group_template")
def test_delete_node_group_templates(self):
scenario = utils.SaharaScenario(self.context)
ng = mock.MagicMock(id=42)
scenario._delete_node_group_template(ng)
delete_mock = self.clients("sahara").node_group_templates.delete
delete_mock.assert_called_once_with(42)
self._test_atomic_action_timer(scenario.atomic_actions(),
"sahara.delete_node_group_template")
@mock.patch(SAHARA_UTILS + ".SaharaScenario._generate_random_name",
return_value="random_name")
@mock.patch(SAHARA_UTILS + ".sahara_consts")
def test_launch_cluster(self, mock_sahara_consts,
mock__generate_random_name):
self.context.update({
"tenant": {
"networks": [
{
"id": "test_neutron_id",
"router_id": "test_router_id"
}
]
}
})
self.clients("services").values.return_value = [
consts.Service.NEUTRON
]
scenario = utils.SaharaScenario(context=self.context)
mock_processes = {
"test_plugin": {
"test_version": {
"master": ["p1"],
"worker": ["p2"]
}
}
}
mock_configs = {
"test_plugin": {
"test_version": {
"target": "HDFS",
"config_name": "dfs.replication"
}
}
}
floating_ip_pool_uuid = uuidutils.generate_uuid()
node_groups = [
{
"name": "master-ng",
"flavor_id": "test_flavor",
"node_processes": ["p1"],
"floating_ip_pool": floating_ip_pool_uuid,
"volumes_per_node": 5,
"volumes_size": 10,
"count": 1,
"auto_security_group": True,
"security_groups": ["g1", "g2"],
"node_configs": {"HDFS": {"local_config": "local_value"}},
}, {
"name": "worker-ng",
"flavor_id": "test_flavor",
"node_processes": ["p2"],
"floating_ip_pool": floating_ip_pool_uuid,
"volumes_per_node": 5,
"volumes_size": 10,
"count": 42,
"auto_security_group": True,
"security_groups": ["g1", "g2"],
"node_configs": {"HDFS": {"local_config": "local_value"}},
}
]
mock_sahara_consts.NODE_PROCESSES = mock_processes
mock_sahara_consts.REPLICATION_CONFIGS = mock_configs
self.clients("sahara").clusters.create.return_value = mock.MagicMock(
id="test_cluster_id")
self.clients("sahara").clusters.get.return_value = mock.MagicMock(
status="active")
scenario._launch_cluster(
plugin_name="test_plugin",
hadoop_version="test_version",
flavor_id="test_flavor",
image_id="test_image",
floating_ip_pool=floating_ip_pool_uuid,
volumes_per_node=5,
volumes_size=10,
auto_security_group=True,
security_groups=["g1", "g2"],
workers_count=42,
node_configs={"HDFS": {"local_config": "local_value"}}
)
self.clients("sahara").clusters.create.assert_called_once_with(
name="random_name",
plugin_name="test_plugin",
hadoop_version="test_version",
node_groups=node_groups,
default_image_id="test_image",
cluster_configs={"HDFS": {"dfs.replication": 3}},
net_id="test_neutron_id",
anti_affinity=None
)
self._test_atomic_action_timer(scenario.atomic_actions(),
"sahara.launch_cluster")
@mock.patch(SAHARA_UTILS + ".SaharaScenario._generate_random_name",
return_value="random_name")
@mock.patch(SAHARA_UTILS + ".sahara_consts")
def test_launch_cluster_error(self, mock_sahara_consts,
mock__generate_random_name):
scenario = utils.SaharaScenario(self.context)
mock_processes = {
"test_plugin": {
"test_version": {
"master": ["p1"],
"worker": ["p2"]
}
}
}
mock_configs = {
"test_plugin": {
"test_version": {
"target": "HDFS",
"config_name": "dfs.replication"
}
}
}
mock_sahara_consts.NODE_PROCESSES = mock_processes
mock_sahara_consts.REPLICATION_CONFIGS = mock_configs
self.clients("sahara").clusters.create.return_value = mock.MagicMock(
id="test_cluster_id")
self.clients("sahara").clusters.get.return_value = mock.MagicMock(
status="error")
self.assertRaises(exceptions.GetResourceErrorStatus,
scenario._launch_cluster,
plugin_name="test_plugin",
hadoop_version="test_version",
flavor_id="test_flavor",
image_id="test_image",
floating_ip_pool="test_pool",
volumes_per_node=5,
volumes_size=10,
workers_count=42,
node_configs={"HDFS": {"local_config":
"local_value"}})
def test_scale_cluster(self):
scenario = utils.SaharaScenario(self.context)
cluster = mock.MagicMock(id=42, node_groups=[{
"name": "random_master",
"count": 1
}, {
"name": "random_worker",
"count": 41
}])
self.clients("sahara").clusters.get.return_value = mock.MagicMock(
id=42,
status="active")
expected_scale_object = {
"resize_node_groups": [{
"name": "random_worker",
"count": 42
}]
}
scenario._scale_cluster(cluster, 1)
self.clients("sahara").clusters.scale.assert_called_once_with(
42, expected_scale_object)
def test_delete_cluster(self):
scenario = utils.SaharaScenario(self.context)
cluster = mock.MagicMock(id=42)
self.clients("sahara").clusters.get.side_effect = [
cluster, sahara_base.APIException()
]
scenario._delete_cluster(cluster)
delete_mock = self.clients("sahara").clusters.delete
delete_mock.assert_called_once_with(42)
cl_get_expected = mock.call(42)
self.clients("sahara").clusters.get.assert_has_calls([cl_get_expected,
cl_get_expected])
self._test_atomic_action_timer(scenario.atomic_actions(),
"sahara.delete_cluster")
@mock.patch(SAHARA_UTILS + ".SaharaScenario._generate_random_name",
return_value="42")
def test_create_output_ds(self, mock__generate_random_name):
self.context.update({
"sahara_output_conf": {
"output_type": "hdfs",
"output_url_prefix": "hdfs://test_out/"
}
})
scenario = utils.SaharaScenario(self.context)
scenario._create_output_ds()
self.clients("sahara").data_sources.create.assert_called_once_with(
name="42",
description="",
data_source_type="hdfs",
url="hdfs://test_out/42"
)
@mock.patch(SAHARA_UTILS + ".SaharaScenario._generate_random_name",
return_value="42")
def test_create_output_ds_swift(self, mock__generate_random_name):
self.context.update({
"sahara_output_conf": {
"output_type": "swift",
"output_url_prefix": "swift://test_out/"
}
})
scenario = utils.SaharaScenario(self.context)
self.assertRaises(exceptions.RallyException,
scenario._create_output_ds)
def test_run_job_execution(self):
self.clients("sahara").job_executions.get.side_effect = [
mock.MagicMock(info={"status": "pending"}, id="42"),
mock.MagicMock(info={"status": "SUCCESS"}, id="42")]
self.clients("sahara").job_executions.create.return_value = (
mock.MagicMock(id="42"))
scenario = utils.SaharaScenario(self.context)
scenario._run_job_execution(job_id="test_job_id",
cluster_id="test_cluster_id",
input_id="test_input_id",
output_id="test_output_id",
configs={"k": "v"},
job_idx=0)
self.clients("sahara").job_executions.create.assert_called_once_with(
job_id="test_job_id",
cluster_id="test_cluster_id",
input_id="test_input_id",
output_id="test_output_id",
configs={"k": "v"}
)
je_get_expected = mock.call("42")
self.clients("sahara").job_executions.get.assert_has_calls(
[je_get_expected, je_get_expected]
)
def test_run_job_execution_fail(self):
self.clients("sahara").job_executions.get.side_effect = [
mock.MagicMock(info={"status": "pending"}, id="42"),
mock.MagicMock(info={"status": "killed"}, id="42")]
self.clients("sahara").job_executions.create.return_value = (
mock.MagicMock(id="42"))
scenario = utils.SaharaScenario(self.context)
self.assertRaises(exceptions.RallyException,
scenario._run_job_execution,
job_id="test_job_id",
cluster_id="test_cluster_id",
input_id="test_input_id",
output_id="test_output_id",
configs={"k": "v"},
job_idx=0)
self.clients("sahara").job_executions.create.assert_called_once_with(
job_id="test_job_id",
cluster_id="test_cluster_id",
input_id="test_input_id",
output_id="test_output_id",
configs={"k": "v"}
)
| {
"content_hash": "85633edad4f05dfcd5025e53f1514c56",
"timestamp": "",
"source": "github",
"line_count": 390,
"max_line_length": 79,
"avg_line_length": 36.207692307692305,
"alnum_prop": 0.516960555201473,
"repo_name": "cernops/rally",
"id": "c332294e4020a52640404819ad9d593c9e611d02",
"size": "14751",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/unit/plugins/openstack/scenarios/sahara/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "48262"
},
{
"name": "Python",
"bytes": "2726491"
},
{
"name": "Shell",
"bytes": "43920"
}
],
"symlink_target": ""
} |
from pygal import Line
def make_chart(values, seulav):
chart = Line(title="It Works!")
chart.add("Weight (lbs)", values)
chart.add("Weight Trend", seulav)
chart.render_to_file('fwt/static/img/todate.svg')
return chart
| {
"content_hash": "813120469c6bf4b135fd7a641291249b",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 56,
"avg_line_length": 27.444444444444443,
"alnum_prop": 0.6518218623481782,
"repo_name": "fgandiya/fwt",
"id": "725d7d0ac3b607a5766f4d50b654da2c50fa0569",
"size": "247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fwt/chart.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1887"
},
{
"name": "JavaScript",
"bytes": "11966"
},
{
"name": "Makefile",
"bytes": "133"
},
{
"name": "Python",
"bytes": "6720"
}
],
"symlink_target": ""
} |
import re
import unittest
from flask import url_for
from app import create_app, db
from app.models import User, Role
class FlaskClientTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
Role.insert_roles()
self.client = self.app.test_client(use_cookies=True)
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_home_page(self):
response = self.client.get(url_for('main.index'))
self.assertTrue('Stranger' in response.get_data(as_text=True))
def test_register_and_login(self):
# register a new account
response = self.client.post(url_for('auth.register'), data={
'email': 'john@example.com',
'username': 'john',
'password': 'cat',
'password2': 'cat'
})
self.assertTrue(response.status_code == 302)
# login with the new account
response = self.client.post(url_for('auth.login'), data={
'email': 'john@example.com',
'password': 'cat'
}, follow_redirects=True)
data = response.get_data(as_text=True)
self.assertTrue(re.search('Hello,\s+john!', data))
self.assertTrue('You have not confirmed your account yet' in data)
# send a confirmation token
user = User.query.filter_by(email='john@example.com').first()
token = user.generate_confirmation_token()
response = self.client.get(url_for('auth.confirm', token=token),
follow_redirects=True)
data = response.get_data(as_text=True)
self.assertTrue('You have confirmed your account' in data)
# log out
response = self.client.get(url_for('auth.logout'),
follow_redirects=True)
data = response.get_data(as_text=True)
self.assertTrue('You have been logged out' in data) | {
"content_hash": "7f507428e187e8cf6446d14eb362c41b",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 74,
"avg_line_length": 36.660714285714285,
"alnum_prop": 0.5986361422308817,
"repo_name": "Tuklab/tuklab101",
"id": "892f7c05a99a85d154f3320dafa2321307c85c71",
"size": "2053",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1855"
},
{
"name": "HTML",
"bytes": "16885"
},
{
"name": "Makefile",
"bytes": "413"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "79248"
},
{
"name": "Shell",
"bytes": "56705"
}
],
"symlink_target": ""
} |
import sys
import boto3
import json
import base64
class colors:
SUCCESS = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
securityGroupId = sys.argv[1]
numberOfChangesExpected = sys.argv[2]
boto3.setup_default_session(profile_name='ChaoSlingr')
client = boto3.client('ec2')
print("Validating PortChange_Generatr for security group " + securityGroupId)
response = client.describe_security_groups(GroupIds=[securityGroupId])
changeFound = False
if 'SecurityGroups' in response and 'IpPermissions' in response['SecurityGroups'][0] and len(response['SecurityGroups'][0]['IpPermissions']) == int(numberOfChangesExpected):
changeFound = True
if changeFound:
print(colors.SUCCESS + "***Test successful.***" + colors.ENDC)
else:
print(colors.FAIL + "***Test FAILED.***" + colors.ENDC)
| {
"content_hash": "1c120a96984aff732832bca88e9522f7",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 173,
"avg_line_length": 34.416666666666664,
"alnum_prop": 0.7263922518159807,
"repo_name": "Optum/ChaoSlingr",
"id": "eb5f0fd7152af4645306a3721dc67821658e6652",
"size": "848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/PortChange_Generatr/verify-lambda.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "2234"
},
{
"name": "HCL",
"bytes": "2088"
},
{
"name": "Python",
"bytes": "10765"
},
{
"name": "Shell",
"bytes": "4090"
}
],
"symlink_target": ""
} |
'''textfilestreamlet.py: module defining a streamlet based on TextFileSpout'''
import glob
from heron.dsl.src.python.streamlet import Streamlet
from heron.dsl.src.python.operation import OperationType
from heron.spouts.src.python.textfiles.textfilespout import TextFileSpout
# pylint: disable=access-member-before-definition
# pylint: disable=attribute-defined-outside-init
class TextFileStreamlet(Streamlet):
"""A TextFileStreamlet is a list of text input files
"""
def __init__(self, filepattern, stage_name=None, parallelism=None):
super(TextFileStreamlet, self).__init__(parents=[], operation=OperationType.Input,
stage_name=stage_name,
parallelism=parallelism)
self._files = glob.glob(filepattern)
@staticmethod
def textFile(filepattern, stage_name=None, parallelism=None):
return TextFileStreamlet(filepattern, stage_name=stage_name, parallelism=parallelism)
# pylint: disable=no-self-use
def _calculate_inputs(self):
return {}
def _calculate_parallelism(self):
return len(self._files)
# pylint: disable=no-self-use
def _calculate_stage_name(self, existing_stage_names):
funcname = "textfileinput"
if funcname not in existing_stage_names:
return funcname
else:
index = 1
newfuncname = funcname + "-" + str(index)
while newfuncname in existing_stage_names:
index = index + 1
newfuncname = funcname + str(index)
return newfuncname
def _build_this(self, bldr):
bldr.add_spout(self._stage_name, TextFileSpout, par=self._parallelism,
config={TextFileSpout.FILES : self._files})
| {
"content_hash": "8fe8668c503c2a89f56777098a472211",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 89,
"avg_line_length": 37.71111111111111,
"alnum_prop": 0.6882734236888627,
"repo_name": "srkukarni/heron",
"id": "5762078a3a93b843134818a1623e3d16f16ab1e8",
"size": "2276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heron/spouts/src/python/textfiles/textfilestreamlet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4710"
},
{
"name": "C++",
"bytes": "1315767"
},
{
"name": "CSS",
"bytes": "109095"
},
{
"name": "HTML",
"bytes": "154389"
},
{
"name": "Java",
"bytes": "3525682"
},
{
"name": "JavaScript",
"bytes": "167652"
},
{
"name": "M4",
"bytes": "17941"
},
{
"name": "Makefile",
"bytes": "517"
},
{
"name": "Objective-C",
"bytes": "1929"
},
{
"name": "Protocol Buffer",
"bytes": "31644"
},
{
"name": "Python",
"bytes": "1382264"
},
{
"name": "Ruby",
"bytes": "1930"
},
{
"name": "Scala",
"bytes": "4640"
},
{
"name": "Shell",
"bytes": "157691"
},
{
"name": "Thrift",
"bytes": "915"
}
],
"symlink_target": ""
} |
def dismiss(user, keys):
"""
Dismiss a hint. Expects parameters ``user`` as a user object
and ``keys`` as an iterable.
"""
from zenaida.contrib.hints.models import Dismissed
keystring = "".join(keys)
return Dismissed.objects.create(user=user, key=keystring)
| {
"content_hash": "c0be43b257b1cc8531f0b037c4bd9c4b",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 64,
"avg_line_length": 26.272727272727273,
"alnum_prop": 0.6643598615916955,
"repo_name": "littleweaver/django-zenaida",
"id": "baa9461551aa93ee6333efdedc930f941f1adc06",
"size": "289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zenaida/contrib/hints/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "252460"
},
{
"name": "HTML",
"bytes": "199106"
},
{
"name": "JavaScript",
"bytes": "159393"
},
{
"name": "Python",
"bytes": "26214"
},
{
"name": "Ruby",
"bytes": "1130"
}
],
"symlink_target": ""
} |
"""Django model to DOT (Graphviz) converter
by Antonio Cavedoni <antonio@cavedoni.org>
Make sure your DJANGO_SETTINGS_MODULE is set to your project or
place this script in the same directory of the project and call
the script like this:
$ python modelviz.py [-h] [-d] <app_label> ... <app_label> > <filename>.dot
$ dot <filename>.dot -Tpng -o <filename>.png
options:
-h, --help
show this help message and exit.
-d, --disable_fields
don't show the class member fields.
"""
__version__ = "0.8"
__svnid__ = "$Id: modelviz.py 1 2008-04-26 08:43:39Z esimorre@free.fr $"
__license__ = "Python"
__author__ = "Antonio Cavedoni <http://cavedoni.com/>"
__contributors__ = [
"Stefano J. Attardi <http://attardi.org/>",
"limodou <http://www.donews.net/limodou/>",
"Carlo C8E Miron",
"Andre Campos <cahenan@gmail.com>",
"Justin Findlay <jfindlay@gmail.com>",
]
import getopt, sys
from django.core.management import setup_environ
try:
import settings
except ImportError:
pass
else:
setup_environ(settings)
from django.template import Template, Context
from django.db import models
from django.db.models import get_models
from django.db.models.fields.related import \
ForeignKey, OneToOneField, ManyToManyField
try:
from django.db.models.fields.generic import GenericRelation
except ImportError:
from django.contrib.contenttypes.generic import GenericRelation
head_template = """
digraph name {
fontname = "Helvetica"
fontsize = 8
node [
fontname = "Helvetica"
fontsize = 8
shape = "plaintext"
]
edge [
fontname = "Helvetica"
fontsize = 8
]
"""
body_template = """
{% for model in models %}
{% for relation in model.relations %}
{{ relation.target }} [label=<
<TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4"
><FONT FACE="Helvetica Bold" COLOR="white"
>{{ relation.target }}</FONT></TD></TR>
</TABLE>
>]
{{ model.name }} -> {{ relation.target }}
[label="{{ relation.name }}"] {{ relation.arrows }};
{% endfor %}
{% endfor %}
{% for model in models %}
{{ model.name }} [label=<
<TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4"
><FONT FACE="Helvetica Bold" COLOR="white"
>{{ model.name }}</FONT></TD></TR>
{% if not disable_fields %}
{% for field in model.fields %}
<TR><TD ALIGN="LEFT" BORDER="0"
><FONT {% if field.blank %}COLOR="#7B7B7B" {% endif %}FACE="Helvetica Bold">{{ field.name }}</FONT
></TD>
<TD ALIGN="LEFT"
><FONT {% if field.blank %}COLOR="#7B7B7B" {% endif %}FACE="Helvetica Bold">{{ field.type }}</FONT
></TD></TR>
{% endfor %}
{% endif %}
</TABLE>
>]
{% endfor %}
"""
tail_template = """
}
"""
def generate_dot(app_labels, **kwargs):
disable_fields = kwargs.get('disable_fields', False)
dot = head_template
for app_label in app_labels:
app = models.get_app(app_label)
graph = Context({
'name': '"%s"' % app.__name__,
'disable_fields': disable_fields,
'models': []
})
for appmodel in get_models(app):
model = {
'name': appmodel.__name__,
'fields': [],
'relations': []
}
# model attributes
def add_attributes():
model['fields'].append({
'name': field.name,
'type': type(field).__name__,
'blank': field.blank
})
for field in appmodel._meta.fields:
add_attributes()
if appmodel._meta.many_to_many:
for field in appmodel._meta.many_to_many:
add_attributes()
# relations
def add_relation(extras=""):
_rel = {
'target': field.rel.to.__name__,
'type': type(field).__name__,
'name': field.name,
'arrows': extras
}
if _rel not in model['relations']:
model['relations'].append(_rel)
for field in appmodel._meta.fields:
if isinstance(field, ForeignKey):
add_relation()
elif isinstance(field, OneToOneField):
add_relation("[arrowhead=none arrowtail=none]")
if appmodel._meta.many_to_many:
for field in appmodel._meta.many_to_many:
if isinstance(field, ManyToManyField):
add_relation("[arrowhead=normal arrowtail=normal]")
elif isinstance(field, GenericRelation):
add_relation(
'[style="dotted"] [arrowhead=normal arrowtail=normal]')
graph['models'].append(model)
t = Template(body_template)
dot += '\n' + t.render(graph)
dot += '\n' + tail_template
return dot
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hd",
["help", "disable_fields"])
except getopt.GetoptError, error:
print __doc__
sys.exit(error)
else:
if not args:
print __doc__
sys.exit()
kwargs = {}
for opt, arg in opts:
if opt in ("-h", "--help"):
print __doc__
sys.exit()
if opt in ("-d", "--disable_fields"):
kwargs['disable_fields'] = True
print generate_dot(args, **kwargs)
if __name__ == "__main__":
main()
| {
"content_hash": "95c0c94924e4d67b887630cacce1c3f1",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 106,
"avg_line_length": 29.11,
"alnum_prop": 0.5398488491927173,
"repo_name": "glasslion/djflow",
"id": "f2e96e1a59707dcc9ad392335a15b1d0838fc068",
"size": "5844",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "leavedemo/modelviz.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "179"
},
{
"name": "CSS",
"bytes": "2056"
},
{
"name": "HTML",
"bytes": "88891"
},
{
"name": "JavaScript",
"bytes": "80177"
},
{
"name": "Python",
"bytes": "134076"
},
{
"name": "Shell",
"bytes": "65"
}
],
"symlink_target": ""
} |
import sys
import os
import time
class Monitor:
def __init__(self, path: str, warningThreshold: int, ErrorThreshold: int):
self.path = path
self.warningThreshold = warningThreshold
self.errorThreshold = ErrorThreshold
self.seekbuffer = 1
def __check__thresholds(self):
if self.warningThreshold > self.errorThreshold:
print("Warning threshold must be less than error threshold")
sys.exit(2)
def __checklogexists(self):
try:
self.f = open(self.path)
self.seekto = os.stat(self.path)[6]
except:
print("I/O error on file", self.path, ".Check if file exists and has right permission for the script to read")
sys.exit(2)
def __get__last__line(self):
try:
self.f.seek((self.seekto - self.seekbuffer), 0)
except:
print("Negative seek, empty log file. Possibly the check coincided when the log file was rotated, or the daemon is dead.")
sys.exit(2)
line = None
counter = 0
while 1:
byte = self.f.read(1)
if byte == "\n" and counter == 1:
line = self.f.readlines()
break
elif byte == "\n":
counter += 1
self.seekbuffer += 1
self.f.seek((self.seekto - self.seekbuffer), 0)
else:
self.seekbuffer += 1
self.f.seek((self.seekto - self.seekbuffer), 0)
self.lastline = line
def __get__time(self):
try:
datetime = self.lastline[0].split(" ")
self.timestamp = " ".join([datetime[6], datetime[7]])
self.pattern = '%Y/%m/%d %H:%M:%S'
except:
print("Error finding time from the log line. This could be a bug. Please getback")
sys.exit(2)
def __get__epoch(self):
self.LogTimeEpoch = int(time.mktime(time.strptime(self.timestamp, self.pattern)))
self.CurrentTimeEpoch = int(time.time())
def __generate_alert(self):
timediff = self.CurrentTimeEpoch - self.LogTimeEpoch
if timediff < self.warningThreshold:
self.Exitstatus = 0
elif timediff > self.warningThreshold and timediff < self.errorThreshold:
self.Exitstatus = 1
else:
self.Exitstatus = 2
def init(self):
self.__check__thresholds()
self.__checklogexists()
self.__get__last__line()
self.__get__time()
self.__get__epoch()
self.__generate_alert()
def toString(self):
self.init()
attrs = vars(self)
print(attrs.items())
| {
"content_hash": "19cb65a033d4bee4b9d519eb6341d02c",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 134,
"avg_line_length": 33.32098765432099,
"alnum_prop": 0.5505742867728789,
"repo_name": "nohupped/ADtoLDAP",
"id": "68b589e58404003967b9f1494f839700b8183b6c",
"size": "2718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LdapSyncMonitor/SyncMonitor/Monitor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6331"
},
{
"name": "Go",
"bytes": "31762"
},
{
"name": "Python",
"bytes": "3630"
}
],
"symlink_target": ""
} |
import time
def rebuild(self):
#########################################################
# Grid <--> CoordSys loops
print time.ctime() + ' started - rebuilding grids and coordsys'
# first processing basic grids and coordsys
for grid in self.griddict.values():
if grid.rcid == 0 or grid.rcid == '':
grid.rebuild()
loop = True
if loop:
all_done = 0
while all_done < 3:
all_done += 1
for grid in self.griddict.values():
if not grid.rebuilded:
if grid.check_to_rebuild():
grid.rebuild()
if all_done > 0:
all_done = 0
for coord in self.coorddict.values():
if not coord.rebuilded:
if coord.check_to_rebuild():
coord.rebuild()
if all_done > 0:
all_done = 0
print time.ctime() + ' finished - rebuilding grids and coordsys'
#########################################################
for prop in self.propdict.values():
prop.rebuild()
print time.ctime() + ' finished - rebuilding properties'
print time.ctime() + ' started - rebuilding elements'
for elem in self.elemdict.values():
elem.rebuild()
print time.ctime() + ' finished - rebuilding elements'
print time.ctime() + ' started - rebuilding subcases'
for sub in self.subcases.values():
sub.rebuild()
print time.ctime() + ' finished - rebuilding subcases'
print time.ctime() + ' started - rebuilding load entities'
for load in self.loaddict.values():
load.rebuild()
load.add2grid()
print time.ctime() + ' finished - rebuilding load entities'
print time.ctime() + ' started - rebuilding constraints'
for cons in self.consdict.values():
cons.add2grid(self)
print time.ctime() + ' finished - rebuilding constraints'
| {
"content_hash": "1b3af16f925c609259d167b30ebca64d",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 68,
"avg_line_length": 40.12,
"alnum_prop": 0.5204386839481555,
"repo_name": "saullocastro/mapy",
"id": "f810c4ca4bbb2b35ade323f1aca74ee2ed9fcf09",
"size": "2006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mapy/model/rebuild.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "176262"
}
],
"symlink_target": ""
} |
import collections
import tables
import os
import ShareYourSystem as SYS
import sys
#</ImportSpecificModules>
#<DefineLocals>
JoiningStr='_'
#</DefineLocals>
#<DefineClass>
class JoinerClass(SYS.TablerClass):
#<DefineHookMethods>
def initAfter(self):
#<DefineSpecificDo>
self.JoinedVariablesList=[]
#</DefineSpecificDo>
def tabularAfter(self,**_TabularingVariablesDict):
#Maybe it is joined
if 'JoinedOrderedDict' in self.ModeledDict:
self.join(self.ModeledDict['JoiningStr'])
'''
self.TabularedRowedTuplesList+=zip(
map(
lambda __JoiningChildObjects:
'ModeledJoinList'+getattr(
__JoiningChildObjects,
DoneJoiningKeyStrKeyStr
),
self.ModeledDict['JoinedOrderedDict'].values()
),
self.JoinedModeledFeaturedIntsTuplesList
)+[('ModeledJoinList',self.ModeledJoinList)]
'''
pass
#</DefineHookMethods>
def join(self,_JoiningStr,_GatheringVariablesList,**_LocalJoiningVariablesDict):
#debug
print('join method')
print('_JoiningStr is ',_JoiningStr)
if _LocalJoiningVariablesDict=={}:
_LocalJoiningVariablesDict['IsJoiningBool']=False
#Nodify
self.nodify(_JoiningStr)
#debug
print('join',self.NodifiedKeyStr)
print('For the deeper child by the way it is already joined')
print('self.JoinedVariablesList',self.JoinedVariablesList)
print('')
if len(self.NodifiedOrderedDict)==0 or _LocalJoiningVariablesDict['IsJoiningBool']:
'''
print('join',self.NodifiedKeyStr)
print('We have the right to join !')
print('len(JoiningOrderedDict)',len(JoiningOrderedDict))
print("_LocalJoiningVariablesDict['IsJoiningBool']",
_LocalJoiningVariablesDict['IsJoiningBool'])
print('')
'''
ParentPointer=getattr(self,self.NodifiedStr+'ParentPointer')
if ParentPointer!=None:
'''
print('join',self.NodifiedKeyStr)
print('ParentPointer exists')
print('')
'''
Int=getattr(self,self.NodifiedStr+"Int")
if Int==0:
'''
print('join',self.NodifiedKeyStr)
print('This is the first child so init
the ParentPointer.JoinedVariablesList')
print('')
'''
ParentPointer.JoinedVariablesList=[""]*len(
getattr(ParentPointer,self.NodifiedStr+"OrderedDict"))
'''
#Get the GatheredVariablesList
GatheredVariablesList=self.gather(_GatheringVariablesList)
print('join',self.NodifiedKeyStr)
print('append in the ParentPointer.JoinedVariablesList[Int]')
print('Int is ',str(Int))
print('GatheredVariablesList is ',GatheredVariablesList)
#set in the parent pointer
ParentPointer.JoinedVariablesList[Int]=JoiningStr.join(
map(
lambda __JoiningTuple:
'('+str(__JoiningTuple[0])+','+str(__JoiningTuple[1])+')',
GatheredVariablesList
)
)
'''
else:
print('join',self.NodifiedKeyStr)
print('ParentPointer is None')
else:
print('join',self.NodifiedKeyStr)
print('This either a not last level of child or it is not yet authorized to join')
print('len(self.NodifiedOrderedDict.values()) is ',len(self.NodifiedOrderedDict.values()))
print("_LocalJoiningVariablesDict['IsJoiningBool']",_LocalJoiningVariablesDict['IsJoiningBool'])
print('so join the deeper children groups first')
print('')
map(
lambda __JoiningObjects:
__JoiningObjects.join(
_JoiningStr,
_GatheringVariablesList,
**_LocalJoiningVariablesDict
),
self.NodifiedOrderedDict.values()
)
'''
print('join',self.NodifiedKeyStr)
print('The deeper children groups are joined now')
print('So join here !')
print('')
'''
self.join(
_JoiningStr,
_GatheringVariablesList,
**dict(
_LocalJoiningVariablesDict,**{'IsJoiningBool':True}
)
)
'''
print("join END for ",self.NodifiedKeyStr)
print('self.JoinedRowIntsTuplesList',self.JoinedRowIntsTuplesList)
print('')
'''
#</DefineClass>
#<DefineAttestingFunctions>
def attest_join():
'''
#Build a grouped structure
Joiner=SYS.JoinerClass().update(
[
(
'App_Structure_ChildJoiner1',
SYS.JoinerClass().update(
[
(
'App_Structure_GrandChildJoiner1',
SYS.JoinerClass()
)
])
),
(
'App_Structure_ChildJoiner2',
SYS.JoinerClass().update(
[]
)
)
]
).walk(
[
'StructuredOrderedDict.items()'
],
**{
'BeforeUpdateList':
[
('parentize',{'ArgsVariable':"Structure"}),
('join',{'ArgsVariable':[
"Structure",
[
["StructuredKeyStr"]
]
]
}
)
]
}
).hdfclose()
'''
#Build Hdf groups
Tabler=SYS.TablerClass().hdformat().update(
[
(
'App_Structure_ChildGrouper1',
SYS.GrouperClass().update(
[
(
'App_Structure_GrandChildTabler1',
SYS.TablerClass()
)
])
)
]
).walk(
[
'StructuredOrderedDict.items()'
],
**{
'BeforeUpdateList':
[
('parentize',{'ArgsVariable':"Structure"}),
('group',{'ArgsVariable':"Structure"})
]
}
).update(
map(
lambda __Tuple:
(
'/App_Structure_ChildGrouper1/App_Structure_GrandChildTabler1/'+__Tuple[0],
__Tuple[1]
),
[
('MyInt',0),
('MyStr',"hello"),
('UnitsInt',3),
('MyIntsList',[2,4,1]),
('MyFloat',0.1),
('MyFloatsList',[2.3,4.5,1.1]),
('App_Scan_ChildModeler',SYS.ModelerClass()),
('App_Model_FeaturingDict',
{
'ColumningTuplesList':
[
('MyInt',tables.Int64Col()),
('MyStr',tables.StrCol(10)),
('MyIntsList',(tables.Int64Col,'UnitsInt'))
]
}
),
('App_Model_OutputingDict',
{
'ColumningTuplesList':
[
('MyFloat',tables.Float32Col()),
('MyFloatsList',(tables.Float32Col,'UnitsInt'))
]
}
),
('model',{'ArgsVariable':"Feature"}),
('tabular',{'ArgsVariable':"Feature"}),
('insert',{}),
('UnitsInt',2),
('MyIntsList',[2,4]),
('MyFloatsList',[2.3,4.5]),
('tabular',{'ArgsVariable':"Feature"}),
('insert',{}),
('model',{
'ArgsVariable':"Output",
'KwargsDict':{'JoiningStr':"Scan"}
}
),
('tabular',{
'ArgsVariable':"Output",
}
),
('insert',{})
]
)
).hdfclose()
#Return the object itself
print(Joiner)
return Joiner
#</DefineAttestingFunctions>
| {
"content_hash": "c53973da791f8d8707916a47209a990a",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 99,
"avg_line_length": 24.861016949152543,
"alnum_prop": 0.5389964548677393,
"repo_name": "Ledoux/ShareYourSystem",
"id": "42c0a63f65ebcc2ca140ee0db12bd99000b1a3bf",
"size": "7359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Pythonlogy/ShareYourSystem/Standards/Modelers/Joiner/draft/Joiner copy.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "86"
},
{
"name": "C++",
"bytes": "4244220"
},
{
"name": "CSS",
"bytes": "142769"
},
{
"name": "CoffeeScript",
"bytes": "37331"
},
{
"name": "HTML",
"bytes": "36211676"
},
{
"name": "JavaScript",
"bytes": "2147968"
},
{
"name": "Jupyter Notebook",
"bytes": "7930602"
},
{
"name": "Makefile",
"bytes": "6362"
},
{
"name": "PHP",
"bytes": "11096341"
},
{
"name": "Python",
"bytes": "5700092"
},
{
"name": "Ruby",
"bytes": "60"
},
{
"name": "Scala",
"bytes": "2412"
},
{
"name": "Shell",
"bytes": "2525"
},
{
"name": "Swift",
"bytes": "154"
},
{
"name": "TeX",
"bytes": "2556"
},
{
"name": "XSLT",
"bytes": "20993"
}
],
"symlink_target": ""
} |
import unittest
import mock
from cinder.tests.unit.volume.drivers.dell_emc.unity \
import fake_exception as ex
from cinder.volume import configuration as conf
from cinder.volume.drivers.dell_emc.unity import driver
########################
#
# Start of Mocks
#
########################
class MockAdapter(object):
def do_setup(self, driver_object, configuration):
raise ex.AdapterSetupError()
@staticmethod
def create_volume(volume):
return volume
@staticmethod
def create_volume_from_snapshot(volume, snapshot):
return volume
@staticmethod
def create_cloned_volume(volume, src_vref):
return volume
@staticmethod
def extend_volume(volume, new_size):
volume.size = new_size
@staticmethod
def delete_volume(volume):
volume.exists = False
@staticmethod
def create_snapshot(snapshot):
snapshot.exists = True
return snapshot
@staticmethod
def delete_snapshot(snapshot):
snapshot.exists = False
@staticmethod
def initialize_connection(volume, connector):
return {'volume': volume, 'connector': connector}
@staticmethod
def terminate_connection(volume, connector):
return {'volume': volume, 'connector': connector}
@staticmethod
def update_volume_stats():
return {'stats': 123}
@staticmethod
def manage_existing(volume, existing_ref):
volume.managed = True
return volume
@staticmethod
def manage_existing_get_size(volume, existing_ref):
volume.managed = True
volume.size = 7
return volume
@staticmethod
def get_pool_name(volume):
return 'pool_0'
@staticmethod
def initialize_connection_snapshot(snapshot, connector):
return {'snapshot': snapshot, 'connector': connector}
@staticmethod
def terminate_connection_snapshot(snapshot, connector):
return {'snapshot': snapshot, 'connector': connector}
########################
#
# Start of Tests
#
########################
class UnityDriverTest(unittest.TestCase):
@staticmethod
def get_volume():
return mock.Mock(provider_location='id^lun_43', id='id_43')
@classmethod
def get_snapshot(cls):
return mock.Mock(volume=cls.get_volume())
@staticmethod
def get_context():
return None
@staticmethod
def get_connector():
return {'host': 'host1'}
def setUp(self):
self.config = conf.Configuration(None)
self.driver = driver.UnityDriver(configuration=self.config)
self.driver.adapter = MockAdapter()
def test_default_initialize(self):
config = conf.Configuration(None)
iscsi_driver = driver.UnityDriver(configuration=config)
self.assertIsNone(config.unity_storage_pool_names)
self.assertTrue(config.san_thin_provision)
self.assertEqual('', config.san_ip)
self.assertEqual('admin', config.san_login)
self.assertEqual('', config.san_password)
self.assertEqual('', config.san_private_key)
self.assertEqual('', config.san_clustername)
self.assertEqual(22, config.san_ssh_port)
self.assertEqual(False, config.san_is_local)
self.assertEqual(30, config.ssh_conn_timeout)
self.assertEqual(1, config.ssh_min_pool_conn)
self.assertEqual(5, config.ssh_max_pool_conn)
self.assertEqual('iSCSI', iscsi_driver.protocol)
def test_fc_initialize(self):
config = conf.Configuration(None)
config.storage_protocol = 'fc'
fc_driver = driver.UnityDriver(configuration=config)
self.assertEqual('FC', fc_driver.protocol)
def test_do_setup(self):
def f():
self.driver.do_setup(None)
self.assertRaises(ex.AdapterSetupError, f)
def test_create_volume(self):
volume = self.get_volume()
self.assertEqual(volume, self.driver.create_volume(volume))
def test_create_volume_from_snapshot(self):
volume = self.get_volume()
snap = self.get_snapshot()
self.assertEqual(
volume, self.driver.create_volume_from_snapshot(volume, snap))
def test_create_cloned_volume(self):
volume = self.get_volume()
self.assertEqual(
volume, self.driver.create_cloned_volume(volume, None))
def test_extend_volume(self):
volume = self.get_volume()
self.driver.extend_volume(volume, 6)
self.assertEqual(6, volume.size)
def test_delete_volume(self):
volume = self.get_volume()
self.driver.delete_volume(volume)
self.assertFalse(volume.exists)
def test_create_snapshot(self):
snapshot = self.get_snapshot()
self.driver.create_snapshot(snapshot)
self.assertTrue(snapshot.exists)
def test_delete_snapshot(self):
snapshot = self.get_snapshot()
self.driver.delete_snapshot(snapshot)
self.assertFalse(snapshot.exists)
def test_ensure_export(self):
self.assertIsNone(self.driver.ensure_export(
self.get_context(), self.get_volume()))
def test_create_export(self):
self.assertIsNone(self.driver.create_export(
self.get_context(), self.get_volume(), self.get_connector()))
def test_remove_export(self):
self.assertIsNone(self.driver.remove_export(
self.get_context(), self.get_volume()))
def test_check_for_export(self):
self.assertIsNone(self.driver.check_for_export(
self.get_context(), self.get_volume()))
def test_initialize_connection(self):
volume = self.get_volume()
connector = self.get_connector()
conn_info = self.driver.initialize_connection(volume, connector)
self.assertEqual(volume, conn_info['volume'])
self.assertEqual(connector, conn_info['connector'])
def test_terminate_connection(self):
volume = self.get_volume()
connector = self.get_connector()
conn_info = self.driver.terminate_connection(volume, connector)
self.assertEqual(volume, conn_info['volume'])
self.assertEqual(connector, conn_info['connector'])
def test_update_volume_stats(self):
stats = self.driver.get_volume_stats(True)
self.assertEqual(123, stats['stats'])
self.assertEqual(self.driver.VERSION, stats['driver_version'])
self.assertEqual(self.driver.VENDOR, stats['vendor_name'])
def test_manage_existing(self):
volume = self.driver.manage_existing(self.get_volume(), None)
self.assertTrue(volume.managed)
def test_manage_existing_get_size(self):
volume = self.driver.manage_existing_get_size(self.get_volume(), None)
self.assertTrue(volume.managed)
self.assertEqual(7, volume.size)
def test_get_pool(self):
self.assertEqual('pool_0', self.driver.get_pool(self.get_volume()))
def test_unmanage(self):
ret = self.driver.unmanage(None)
self.assertIsNone(ret)
def test_backup_use_temp_snapshot(self):
self.assertTrue(self.driver.backup_use_temp_snapshot())
def test_create_export_snapshot(self):
snapshot = self.driver.create_export_snapshot(self.get_context(),
self.get_snapshot(),
self.get_connector())
self.assertTrue(snapshot.exists)
def test_remove_export_snapshot(self):
snapshot = self.get_snapshot()
self.driver.remove_export_snapshot(self.get_context(), snapshot)
self.assertFalse(snapshot.exists)
def test_initialize_connection_snapshot(self):
snapshot = self.get_snapshot()
conn_info = self.driver.initialize_connection_snapshot(
snapshot, self.get_connector())
self.assertEqual(snapshot, conn_info['snapshot'])
def test_terminate_connection_snapshot(self):
snapshot = self.get_snapshot()
conn_info = self.driver.terminate_connection_snapshot(
snapshot, self.get_connector())
self.assertEqual(snapshot, conn_info['snapshot'])
| {
"content_hash": "74cc8746f046306446c6e971e162990d",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 78,
"avg_line_length": 32.13779527559055,
"alnum_prop": 0.6415533504838907,
"repo_name": "ge0rgi/cinder",
"id": "b1ce7721bd1da56df6323193cb8419b1ff3abb26",
"size": "8786",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/ocata",
"path": "cinder/tests/unit/volume/drivers/dell_emc/unity/test_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19963591"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('imager_images', '0010_auto_20150728_1429'),
]
operations = [
migrations.AddField(
model_name='album',
name='published',
field=models.CharField(default='private', max_length=255, choices=[('public', 'public'), ('shared', 'shared'), ('private', 'private')]),
),
migrations.AlterField(
model_name='album',
name='date_modified',
field=models.DateTimeField(default=datetime.datetime(2015, 7, 28, 22, 15, 35, 492355, tzinfo=utc), auto_now=True),
preserve_default=False,
),
migrations.AlterField(
model_name='photo',
name='date_modified',
field=models.DateTimeField(default=datetime.datetime(2015, 7, 28, 22, 15, 42, 61686, tzinfo=utc), auto_now=True),
preserve_default=False,
),
migrations.AlterField(
model_name='photo',
name='published',
field=models.CharField(default='private', max_length=255, choices=[('public', 'public'), ('shared', 'shared'), ('private', 'private')]),
),
]
| {
"content_hash": "08188c5ea7d67fae941c49ae8f7fb863",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 148,
"avg_line_length": 35.945945945945944,
"alnum_prop": 0.5887218045112782,
"repo_name": "gatita/django-imager",
"id": "ab4f2610a2105023114f4437f85c1f3681c0ca8e",
"size": "1354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imagersite/imager_images/migrations/0011_auto_20150728_1515.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10537"
},
{
"name": "HTML",
"bytes": "27190"
},
{
"name": "JavaScript",
"bytes": "54533"
},
{
"name": "Python",
"bytes": "58895"
}
],
"symlink_target": ""
} |
"""Testing binary Heap."""
import pytest
@pytest.fixture
def empty_heap():
"""Fixture for empty_heap."""
from src.binheap import Binheap
bh = Binheap()
return bh
@pytest.fixture
def heap():
"""Fixture for a heap."""
from src.binheap import Binheap
bh = Binheap([10, 4, 2, 6, 13, 72, 1, 49])
return bh
def test_push_val_to_head(empty_heap):
"""Test push first val adds to the head."""
empty_heap.push(3)
assert empty_heap.container == [None, 3]
def test_push_val(empty_heap):
"""Test push second val adds to the tree."""
empty_heap.push(3)
empty_heap.push(2)
assert empty_heap.container == [None, 3, 2]
def test_push_val_large(empty_heap):
"""Test push val for larger number."""
empty_heap.push(3)
empty_heap.push(2)
empty_heap.push(1)
empty_heap.push(16)
assert empty_heap.container == [None, 16, 3, 1, 2]
def test_push_on_empty(empty_heap):
"""Test push on an empty list."""
empty_heap.push(1)
assert empty_heap.container == [None, 1]
def test_initialize_iterable(heap):
"""Test heap can be initialized with iterable."""
assert heap.container == [None, 72, 49, 13, 10, 6, 2, 1, 4]
def test_display(heap):
"""Test the display method."""
tree = ' 72 \n 49 13 \n 10 6 2 1 \n4 \n'
assert heap.display() == tree
def test_pop(heap):
"""Test pop method."""
heap.pop()
assert heap.container == [None, 49, 13, 10, 6, 2, 1, 4]
def test_push_pop(heap):
"""Test push followed by a pop."""
heap.push(5)
heap.pop()
assert heap.container == [None, 49, 13, 10, 6, 2, 1, 4, 5]
def test_pop_empty(empty_heap):
"""Test pop on an empty list."""
with pytest.raises(IndexError):
empty_heap.pop()
| {
"content_hash": "8ecbe0f7d112a0ca79230dab7a3cf4c6",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 63,
"avg_line_length": 22.883116883116884,
"alnum_prop": 0.6089670828603859,
"repo_name": "clair3st/Data-Structures",
"id": "55ff62c27d8284db7421e54866688b886a95996d",
"size": "1762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_binheap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59644"
}
],
"symlink_target": ""
} |
"""
MI logging can be configured using a combination of two of four files.
there is first a "base" configuration, and then a "local" set of overrides.
the base configuration is from the file specified in the environment variable MI_LOGGING_CONFIG
or res/config/mi-logging.yml (ie, users can set MI-specific configuration for drivers run from pycc container)
or config/logging.yml from within the MI egg (default to use if no mi-logging.yml was created)
then the local override may be res/config/mi-logging.local.yml (for overrides specific to MI),
or if this is not found, then res/config/logging.local.yml,
or if this is not found then no overrides.
The get_logger function is obsolete but kept to simplify transition to the ooi.logging code.
USAGE:
to configure logging from the standard MI configuration files:
from mi.core.log import LoggerManager
LoggerManager()
to create a logger automatically scoped with the calling package and ready to use:
from ooi.logging import log # no longer need get_logger at all
"""
import inspect
import logging
import os
import sys
import yaml
import pkg_resources
from types import FunctionType
from functools import wraps
from mi.core.common import Singleton
from ooi.logging import config, log
LOGGING_CONFIG_ENVIRONMENT_VARIABLE="MI_LOGGING_CONFIG"
LOGGING_PRIMARY_FROM_FILE='res/config/mi-logging.yml'
LOGGING_PRIMARY_FROM_EGG='mi-logging.yml'
LOGGING_MI_OVERRIDE='res/config/mi-logging.local.yml'
LOGGING_CONTAINER_OVERRIDE='res/config/logging.local.yml'
"""Basic pyon logging (with or without container)
NOTE: the functionality of this module has moved to ooi.logging.config.
currently this module is maintained for API compatability, but is implemented using the new package.
"""
import logging
from ooi.logging import config
DEFAULT_LOGGING_PATHS = ['res/config/logging.yml', 'res/config/logging.local.yml']
logging_was_configured = False
def configure_logging(logging_conf_paths, logging_config_override=None):
"""
Public call to configure and initialize logging.
@param logging_conf_paths List of paths to logging config YML files (in read order)
@param config_override Dict with config entries overriding files read
"""
global logging_was_configured
logging_was_configured = True
for path in logging_conf_paths:
try:
config.add_configuration(path)
except Exception, e:
print 'WARNING: could not load logging configuration file %s: %s' % (path, e)
if logging_config_override:
try:
config.add_configuration(logging_config_override)
except Exception,e:
print 'WARNING: failed to apply logging override %r: %e' % (logging_config_override,e)
# direct warnings mechanism to loggers
logging.captureWarnings(True)
def is_logging_configured():
""" allow caller to determine if logging has already been configured in this container """
global logging_was_configured
return logging_was_configured or config.get_configuration()
class LoggerManager(Singleton):
"""
Logger Manager. Provides an interface to configure logging at runtime.
"""
def init(self, debug=False):
"""Initialize logging for MI. Because this is a singleton it will only be initialized once."""
path = os.environ[LOGGING_CONFIG_ENVIRONMENT_VARIABLE] if LOGGING_CONFIG_ENVIRONMENT_VARIABLE in os.environ else None
haveenv = path and os.path.isfile(path)
if path and not haveenv:
print >> os.stderr, 'WARNING: %s was set but %s was not found (using default configuration files instead)' % (LOGGING_CONFIG_ENVIRONMENT_VARIABLE, path)
if path and haveenv:
config.replace_configuration(path)
if debug:
print >> sys.stderr, str(os.getpid()) + ' configured logging from ' + path
elif os.path.isfile(LOGGING_PRIMARY_FROM_FILE):
config.replace_configuration(LOGGING_PRIMARY_FROM_FILE)
if debug:
print >> sys.stderr, str(os.getpid()) + ' configured logging from ' + LOGGING_PRIMARY_FROM_FILE
else:
logconfig = pkg_resources.resource_string('mi', LOGGING_PRIMARY_FROM_EGG)
parsed = yaml.load(logconfig)
config.replace_configuration(parsed)
if debug:
print >> sys.stderr, str(os.getpid()) + ' configured logging from config/' + LOGGING_PRIMARY_FROM_FILE
if os.path.isfile(LOGGING_MI_OVERRIDE):
config.add_configuration(LOGGING_MI_OVERRIDE)
if debug:
print >> sys.stderr, str(os.getpid()) + ' supplemented logging from ' + LOGGING_MI_OVERRIDE
elif os.path.isfile(LOGGING_CONTAINER_OVERRIDE):
config.add_configuration(LOGGING_CONTAINER_OVERRIDE)
if debug:
print >> sys.stderr, str(os.getpid()) + ' supplemented logging from ' + LOGGING_CONTAINER_OVERRIDE
class LoggingMetaClass(type):
_log_level = 'trace'
def __new__(mcs, class_name, bases, class_dict):
wrapped_set_name = '__wrapped'
wrapper = log_method(class_name=class_name, log_level=mcs._log_level)
new_class_dict = {}
wrapped = class_dict.get(wrapped_set_name, set())
# wrap all methods, unless they have been previously wrapped
for attributeName, attribute in class_dict.items():
if attributeName not in wrapped and type(attribute) == FunctionType:
attribute = wrapper(attribute)
wrapped.add(attributeName)
new_class_dict[attributeName] = attribute
new_class_dict[wrapped_set_name] = wrapped
return type.__new__(mcs, class_name, bases, new_class_dict)
class DebugLoggingMetaClass(LoggingMetaClass):
_log_level = 'debug'
class InfoLoggingMetaClass(DebugLoggingMetaClass):
_log_level = 'info'
class WarnLoggingMetaClass(InfoLoggingMetaClass):
_log_level = 'warn'
class ErrorLoggingMetaClass(WarnLoggingMetaClass):
_log_level = 'error'
def get_logging_metaclass(log_level='trace'):
class_map = {
'trace': LoggingMetaClass,
'debug': DebugLoggingMetaClass,
'info': InfoLoggingMetaClass,
'warn': WarnLoggingMetaClass,
'error': ErrorLoggingMetaClass,
}
return class_map.get(log_level, LoggingMetaClass)
def log_method(class_name=None, log_level='trace'):
name = "UNKNOWN_MODULE_NAME"
stack = inspect.stack()
# step through the stack until we leave mi.core.log
for frame in stack:
module = inspect.getmodule(frame[0])
if module:
name = module.__name__
if name != 'mi.core.log':
break
logger = logging.getLogger(name)
def wrapper(func):
if class_name is not None:
func_name = '%s.%s' % (class_name, func.__name__)
else:
func_name = func.__name__
@wraps(func)
def inner(*args, **kwargs):
getattr(logger, log_level)('entered %s | args: %r | kwargs: %r', func_name, args, kwargs)
r = func(*args, **kwargs)
getattr(logger, log_level)('exiting %s | returning %r', func_name, r)
return r
return inner
return wrapper
def get_logger():
return log
manager = LoggerManager()
| {
"content_hash": "a05be3897da96dd59594c57867e46065",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 164,
"avg_line_length": 35.650485436893206,
"alnum_prop": 0.6744281045751634,
"repo_name": "tapanagupta/mi-instrument",
"id": "ac8c8c7160ade27fe6dc86dcc4e16cf0f3697062",
"size": "7344",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mi/core/log.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "6417660"
}
],
"symlink_target": ""
} |
from unittest import mock
import pytest
from mopidy.internal import path
from mopidy.models import Track
from mopidy.stream import actor
from tests import path_to_data_dir
@pytest.fixture
def config():
return {
"proxy": {},
"stream": {
"timeout": 1000,
"metadata_blacklist": [],
"protocols": ["file"],
},
"file": {"enabled": False},
}
@pytest.fixture
def audio():
return mock.Mock()
@pytest.fixture
def track_uri():
return path.path_to_uri(path_to_data_dir("song1.wav"))
def test_lookup_ignores_unknown_scheme(audio, config):
backend = actor.StreamBackend(audio=audio, config=config)
assert backend.library.lookup("http://example.com") == []
def test_lookup_respects_blacklist(audio, config, track_uri):
config["stream"]["metadata_blacklist"].append(track_uri)
backend = actor.StreamBackend(audio=audio, config=config)
assert backend.library.lookup(track_uri) == [Track(uri=track_uri)]
def test_lookup_respects_blacklist_globbing(audio, config, track_uri):
blacklist_glob = path.path_to_uri(path_to_data_dir("")) + "*"
config["stream"]["metadata_blacklist"].append(blacklist_glob)
backend = actor.StreamBackend(audio=audio, config=config)
assert backend.library.lookup(track_uri) == [Track(uri=track_uri)]
def test_lookup_converts_uri_metadata_to_track(audio, config, track_uri):
backend = actor.StreamBackend(audio=audio, config=config)
result = backend.library.lookup(track_uri)
assert len(result) == 1
track = result[0]
assert track.uri == track_uri
assert track.length == 4406
| {
"content_hash": "7e35a5892d23ce916dc7ef95724f86d9",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 73,
"avg_line_length": 26.126984126984127,
"alnum_prop": 0.6743620899149453,
"repo_name": "jodal/mopidy",
"id": "24af43506bcc5d907b3aca236a7dfbbe3766d7ae",
"size": "1646",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "tests/stream/test_library.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "610"
},
{
"name": "HTML",
"bytes": "805"
},
{
"name": "Python",
"bytes": "1231080"
},
{
"name": "Roff",
"bytes": "573"
},
{
"name": "Shell",
"bytes": "556"
}
],
"symlink_target": ""
} |
from . import *
import datetime
__author__ = 'Preetam'
# <editor-fold desc="MongoDB models">
# Class AuditModel - Should be embedded in all DynamicDocument and Document models of the project
class AuditModel(EmbeddedDocument):
created_date = DateTimeField(default=datetime.datetime.now, required=False)
updated_date = DateTimeField(default=datetime.datetime.now, required=False)
# TODO Use reference fields in comments below and make the fields required
created_by = StringField(required=False, default='-1')
#ReferenceField(User, default="None", required=False)
updated_by = StringField(required=False, default='-1')
#ReferenceField(User, default="None", required=False)
# </editor-fold>
# <editor-fold desc="Swagger models">
# Swagger input model
@swagger.model
class AuditInputModel:
resource_fields = {
'created_date': fields.DateTime,
'updated_date': fields.DateTime,
'created_by': fields.String,
'updated_by': fields.String
}
#required = ['created_date']
# Swagger output model
@swagger.model
class AuditOutputModel:
resource_fields = {
'created_date': fields.DateTime,
'updated_date': fields.DateTime,
'created_by': fields.String,
'updated_by': fields.String
}
# </editor-fold>
| {
"content_hash": "0d89b8c70e2406a4cb8385d1421792cc",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 97,
"avg_line_length": 27.851063829787233,
"alnum_prop": 0.692131398013751,
"repo_name": "usc-isi-i2/dig-crowd-sourcing",
"id": "a78451e40b823e4a423d02bde009250850fff0bd",
"size": "1309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adnota/adnota/api/v1_0/models/audit_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "29616"
},
{
"name": "HTML",
"bytes": "206259"
},
{
"name": "JavaScript",
"bytes": "171871"
},
{
"name": "Python",
"bytes": "133716"
},
{
"name": "TypeScript",
"bytes": "125875"
}
],
"symlink_target": ""
} |
"""Utilities to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import itertools
import os
import warnings
import numpy as np
import six
from tensorflow.contrib.tensorrt.python import trt_convert
# pylint: disable=unused-import
from tensorflow.contrib.tensorrt.python.ops import trt_engine_op
# pylint: enable=unused-import
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
TfTrtIntegrationTestParams = namedtuple("TfTrtIntegrationTestParams", [
"gdef", "input_names", "input_dims", "output_names", "expected_output_dims"
])
RunParams = namedtuple(
"RunParams",
["use_optimizer", "precision_mode", "dynamic_engine", "test_name"])
ConversionParams = namedtuple("ConversionParams", [
"max_batch_size", "max_workspace_size_bytes", "precision_mode",
"minimum_segment_size", "is_dynamic_op", "maximum_cached_engines",
"cached_engine_batch_sizes", "rewriter_config"
])
PRECISION_MODES = ["FP32", "FP16", "INT8"]
def IsQuantizationMode(mode):
return mode == "INT8"
class GraphState(object):
ORIGINAL = 0
CALIBRATE = 1
INFERENCE = 2
class TfTrtIntegrationTestBase(test_util.TensorFlowTestCase):
"""Class to test Tensorflow-TensorRT integration."""
@property
def trt_incompatible_op(self):
return math_ops.sin
@property
def precision_modes(self):
return ["FP32", "FP16", "INT8"]
# str is bytes in py2, but unicode in py3.
def _ToUnicode(self, s):
if six.PY2:
if isinstance(s, unicode):
return s
return s.decode("utf-8")
else:
if isinstance(s, str):
return s
return s.decode("utf-8")
def _ToBytes(self, s):
if six.PY2:
if isinstance(s, unicode):
return s.encode("utf-8")
return s
else:
if isinstance(s, str):
return s.encode("utf-8")
return s
def _ToString(self, s):
if six.PY2:
if isinstance(s, unicode):
return s.encode("utf-8")
return s
else:
if isinstance(s, str):
return s
return s.decode("utf-8")
@classmethod
def setUpClass(cls):
"""Setup method for the module."""
super(TfTrtIntegrationTestBase, cls).setUpClass()
trt_convert.enable_test_value()
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TfTrtIntegrationTestBase, self).__init__(methodName)
self._trt_test_params = None
def setUp(self):
"""Setup method."""
super(TfTrtIntegrationTestBase, self).setUp()
warnings.simplefilter("always")
trt_convert.clear_test_values("")
def GetParams(self):
"""Return a TfTrtIntegrationTestParams for test, implemented by subclass."""
raise NotImplementedError()
def GetConversionParams(self, run_params):
"""Return a ConversionParams for test."""
return ConversionParams(
max_batch_size=max([
dims[0] for dims in self._GetParamsCached().input_dims if len(dims)
]),
max_workspace_size_bytes=1 << 25,
precision_mode=run_params.precision_mode,
minimum_segment_size=2,
is_dynamic_op=run_params.dynamic_engine,
maximum_cached_engines=1,
cached_engine_batch_sizes=None,
rewriter_config=None)
def ShouldRunTest(self, run_params):
"""Whether to run the test."""
return True
def VerifyRunForEngine(self, engine_name, graph_state, expect_run=True):
"""Verify the state of a particular engine after sess.run()."""
if graph_state == GraphState.ORIGINAL:
self._ExpectCalibration(engine_name, "")
self._ExpectNativeSegment(engine_name, "")
self._ExpectTrtEngine(engine_name, "")
elif graph_state == GraphState.CALIBRATE:
self._ExpectCalibration(engine_name, "done")
self._ExpectNativeSegment(engine_name, "done")
self._ExpectTrtEngine(engine_name, "")
elif graph_state == GraphState.INFERENCE:
self._ExpectCalibration(engine_name, "")
if expect_run:
self._ExpectNativeSegment(engine_name, "")
self._ExpectTrtEngine(engine_name, "done")
else:
self._ExpectNativeSegment(engine_name, "done")
self._ExpectTrtEngine(engine_name, "")
def VerifyRun(self, run_params, graph_state):
"""Verify the state of all engines after sess.run()."""
for engine_name in self.ExpectedEnginesToBuild(run_params):
expect_run = (engine_name in self.ExpectedEnginesToRun(run_params))
self.VerifyRunForEngine(engine_name, graph_state, expect_run)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build, implemented by subclass."""
raise NotImplementedError()
def ExpectedEnginesToRun(self, run_params):
"""Return the expected engines to run."""
return self.ExpectedEnginesToBuild(run_params)
def ExpectedAbsoluteTolerance(self, run_params):
"""The absolute tolerance to compare floating point results."""
return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-02
def ExpectedRelativeTolerance(self, run_params):
"""The relative tolerance to compare floating point results."""
return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-02
def _GetParamsCached(self):
if self._trt_test_params is None:
self._trt_test_params = self.GetParams()
return self._trt_test_params
def _PrepareRun(self, graph_state):
"""Set up necessary testing environment before calling sess.run()."""
# Clear test values added by TRTEngineOp.
trt_convert.clear_test_values("my_trt_op_.*:ExecuteTrtEngine")
trt_convert.clear_test_values("my_trt_op_.*:ExecuteCalibration")
trt_convert.clear_test_values("my_trt_op_.*:ExecuteNativeSegment")
def _GetConfigProto(self, run_params, graph_state):
"""Get config proto based on specific settings."""
if graph_state != GraphState.ORIGINAL and run_params.use_optimizer:
conversion_params = self.GetConversionParams(run_params)
rewriter_cfg = trt_convert.tensorrt_rewriter_config(
conversion_params.rewriter_config, conversion_params.max_batch_size,
conversion_params.max_workspace_size_bytes,
conversion_params.precision_mode,
conversion_params.minimum_segment_size,
conversion_params.is_dynamic_op,
conversion_params.maximum_cached_engines,
conversion_params.cached_engine_batch_sizes)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_cfg)
else:
graph_options = config_pb2.GraphOptions()
gpu_options = config_pb2.GPUOptions()
gpu_options.allow_growth = True
if trt_convert.get_linked_tensorrt_version()[0] == 3:
gpu_options.per_process_gpu_memory_fraction = 0.50
config = config_pb2.ConfigProto(
gpu_options=gpu_options, graph_options=graph_options)
return config
def _ExpectTestValue(self, engine_name, method, expected_value):
label = "%s:%s" % (engine_name, method)
actual_value = trt_convert.get_test_value(label)
self.assertEqual(
expected_value,
actual_value,
msg="Unexpected test value with label %s. Actual: %s; expected: %s" %
(label, actual_value, expected_value))
def _ExpectCalibration(self, engine_name, value):
self._ExpectTestValue(engine_name, "ExecuteCalibration", value)
def _ExpectTrtEngine(self, engine_name, value):
self._ExpectTestValue(engine_name, "ExecuteTrtEngine", value)
def _ExpectNativeSegment(self, engine_name, value):
self._ExpectTestValue(engine_name, "ExecuteNativeSegment", value)
def _RunGraph(self,
run_params,
gdef,
input_data,
config,
graph_state,
num_runs=2):
"""Run given graphdef multiple times."""
params = self._GetParamsCached()
assert len(params.input_names) == len(input_data)
g = ops.Graph()
with g.as_default():
io_ops = importer.import_graph_def(
graph_def=gdef,
return_elements=params.input_names + params.output_names,
name="")
inputs = [op.outputs[0] for op in io_ops[:len(params.input_names)]]
assert len(inputs) == len(input_data)
outputs = [op.outputs[0] for op in io_ops[len(params.input_names):]]
with self.test_session(
graph=g, config=config, use_gpu=True, force_gpu=True) as sess:
val = None
# Defaults to 2 runs to verify result across multiple runs is same.
for _ in range(num_runs):
self._PrepareRun(graph_state)
new_val = sess.run(
outputs, {inputs[i]: input_data[i] for i in range(len(inputs))})
output_len = len(params.expected_output_dims)
self.assertEqual(output_len, len(new_val))
for i in range(output_len):
self.assertEqual(params.expected_output_dims[i], new_val[i].shape)
if val is not None:
self.assertAllClose(val, new_val, atol=1.e-06, rtol=1.e-06)
val = new_val
self.VerifyRun(run_params, graph_state)
return val
# Use real data that is representative of the inference dataset
# for calibration. For this test script it is random data.
def _RunCalibration(self, run_params, gdef, input_data, config):
"""Run calibration on given graph."""
return self._RunGraph(
run_params, gdef, input_data, config, GraphState.CALIBRATE, num_runs=5)
def _GetTrtGraphDef(self, run_params, gdef):
"""Return trt converted graphdef."""
params = self._GetParamsCached()
conversion_params = self.GetConversionParams(run_params)
logging.info(conversion_params)
return trt_convert.create_inference_graph(
input_graph_def=gdef,
outputs=params.input_names + params.output_names,
max_batch_size=conversion_params.max_batch_size,
max_workspace_size_bytes=conversion_params.max_workspace_size_bytes,
precision_mode=conversion_params.precision_mode,
minimum_segment_size=conversion_params.minimum_segment_size,
is_dynamic_op=conversion_params.is_dynamic_op,
maximum_cached_engines=conversion_params.maximum_cached_engines,
cached_engine_batch_sizes=conversion_params.cached_engine_batch_sizes,
rewriter_config=conversion_params.rewriter_config)
def _WriteGraph(self, run_params, gdef, graph_state):
if graph_state == GraphState.ORIGINAL:
label = "Original"
elif graph_state == GraphState.CALIBRATE:
label = "CalibEngine"
elif graph_state == GraphState.INFERENCE:
label = "InferEngine"
graph_name = (
self.__class__.__name__ + "_" + run_params.test_name + "_" + label +
".pbtxt")
temp_dir = os.getenv("TRT_TEST_TMPDIR", self.get_temp_dir())
if temp_dir:
logging.info("Writing graph to %s/%s", temp_dir, graph_name)
graph_io.write_graph(gdef, temp_dir, graph_name)
def _VerifyConnections(self, expected_engines, converted_gdef):
params = self._GetParamsCached()
old_to_new_node_map = {
self._ToString(node.name): self._ToString(node.name)
for node in params.gdef.node
}
for engine_name, node_names in expected_engines.items():
for node_name in node_names:
old_to_new_node_map[node_name] = engine_name
name_to_node_map = {
self._ToString(node.name): node for node in params.gdef.node
}
def _InputName(inp):
inp = self._ToString(inp)
prefix = ""
if inp[0] == "^":
prefix = "^"
inp = inp[1:]
parts = inp.split(":")
if len(parts) > 1 and parts[-1].isdigit():
inp = inp[:-len(parts[-1]) - 1]
return (prefix, inp)
expected_input_map = {}
for node in params.gdef.node:
name_str = self._ToString(node.name)
target_node_name = old_to_new_node_map[name_str]
is_engine_op = (target_node_name != name_str)
if target_node_name not in expected_input_map:
expected_input_map[target_node_name] = set()
input_set = expected_input_map[target_node_name]
for inp in node.input:
(prefix, inp_name) = _InputName(inp)
# Add the input only if it's outside the segment (note that it could be
# in a different engine).
if (not is_engine_op or
old_to_new_node_map[inp_name] != target_node_name):
if is_engine_op and name_to_node_map[inp_name].op == "Const":
# Const data input nodes to the segment has been copied to the
# segment graphdef and the engine, and the dependency has been
# converted to control dependendy.
input_set.add("^" + old_to_new_node_map[inp_name])
else:
input_set.add(prefix + old_to_new_node_map[inp_name])
actual_input_map = {}
for node in converted_gdef.node:
name_str = self._ToString(node.name)
actual_input_map[name_str] = set()
input_set = actual_input_map[name_str]
for inp in node.input:
(prefix, node_name) = _InputName(inp)
input_set.add(prefix + node_name)
self.assertEqual(
expected_input_map,
actual_input_map,
msg="expected:\n%s\nvs actual:\n%s" % (sorted(
expected_input_map.items()), sorted(actual_input_map.items())))
def _VerifyGraphDef(self, run_params, gdef, graph_state):
self._WriteGraph(run_params, gdef, graph_state)
expected_engines = self.ExpectedEnginesToBuild(run_params)
num_engines = 0
for node in gdef.node:
if node.op == "TRTEngineOp":
logging.info("Found TRTEngineOp: " + node.name)
for node in gdef.node:
if node.op == "TRTEngineOp":
num_engines += 1
self.assertTrue(node.name in expected_engines, node.name)
self.assertTrue(len(node.attr["serialized_segment"].s), node.name)
self.assertTrue(len(node.attr["segment_funcdef_name"].s), node.name)
self.assertEqual(
self._ToBytes(run_params.precision_mode),
node.attr["precision_mode"].s, node.name)
is_dynamic_engine = not node.attr["static_engine"].b
self.assertEqual(run_params.dynamic_engine, is_dynamic_engine,
node.name)
has_calibration_data = len(node.attr["calibration_data"].s)
if (IsQuantizationMode(run_params.precision_mode) and
graph_state == GraphState.INFERENCE):
self.assertTrue(has_calibration_data, node.name)
else:
self.assertFalse(has_calibration_data, node.name)
if graph_state == GraphState.ORIGINAL:
self.assertEqual(0, num_engines)
else:
self.assertEqual(num_engines, len(expected_engines))
if isinstance(expected_engines, dict):
self._VerifyConnections(expected_engines, gdef)
# TODO(aaroey): consider verifying the corresponding TF function.
def RunTest(self, run_params):
if not self.ShouldRunTest(run_params):
return
assert run_params.precision_mode in PRECISION_MODES
np.random.seed(12345)
params = self._GetParamsCached()
input_gdef = params.gdef
input_dtypes = {}
for node in input_gdef.node:
if self._ToString(node.name) in params.input_names:
assert self._ToString(node.op) == "Placeholder"
input_dtypes[self._ToString(node.name)] = (
dtypes.as_dtype(node.attr["dtype"].type).as_numpy_dtype())
assert len(params.input_names) == len(input_dtypes)
input_data = []
for i in range(len(params.input_names)):
dtype = input_dtypes[params.input_names[i]]
# Multiply the input by some constant to avoid all zeros input for integer
# types.
scale = 10.0 if np.issubdtype(dtype, np.integer) else 1.0
dims = params.input_dims[i]
input_data.append((scale * np.random.random_sample(dims)).astype(dtype))
self._VerifyGraphDef(run_params, input_gdef, GraphState.ORIGINAL)
# Get reference result without running trt.
config_no_trt = self._GetConfigProto(run_params, GraphState.ORIGINAL)
logging.info("Running original graph w/o trt, config:\n%s",
str(config_no_trt))
ref_result = self._RunGraph(run_params, input_gdef, input_data,
config_no_trt, GraphState.ORIGINAL)
# Run calibration if necessary.
if IsQuantizationMode(run_params.precision_mode):
calib_config = self._GetConfigProto(run_params, GraphState.CALIBRATE)
logging.info("Running calibration graph, config:\n%s", str(calib_config))
if run_params.use_optimizer:
result = self._RunCalibration(run_params, input_gdef, input_data,
calib_config)
else:
calib_gdef = self._GetTrtGraphDef(run_params, input_gdef)
self._VerifyGraphDef(run_params, calib_gdef, GraphState.CALIBRATE)
result = self._RunCalibration(run_params, calib_gdef, input_data,
calib_config)
infer_gdef = trt_convert.calib_graph_to_infer_graph(
calib_gdef, run_params.dynamic_engine)
self._VerifyGraphDef(run_params, infer_gdef, GraphState.INFERENCE)
self.assertAllClose(
ref_result,
result,
atol=self.ExpectedAbsoluteTolerance(run_params),
rtol=self.ExpectedRelativeTolerance(run_params))
else:
infer_gdef = input_gdef
# Run inference.
infer_config = self._GetConfigProto(run_params, GraphState.INFERENCE)
logging.info("Running final inference graph, config:\n%s",
str(infer_config))
if not run_params.use_optimizer:
infer_gdef = self._GetTrtGraphDef(run_params, infer_gdef)
self._VerifyGraphDef(run_params, infer_gdef, GraphState.INFERENCE)
result = self._RunGraph(run_params, infer_gdef, input_data, infer_config,
GraphState.INFERENCE)
self.assertAllClose(
ref_result,
result,
atol=self.ExpectedAbsoluteTolerance(run_params),
rtol=self.ExpectedRelativeTolerance(run_params))
def testIdempotence(self):
# Test that applying tensorrt optimizer or offline conversion tools multiple
# times to the same graph will result in same graph.
#
# TODO(aaroey): currently the conversion is not deterministic, this is
# mainly because during tensorflow::ConvertGraphDefToGraph(), the graph uses
# EdgeSet which use a map keyed by Edge*, so the order of input/output edges
# of a node is nondeterministic, thus the order for segmenter to contract
# edges is nondeterministic. Need to evaluate whether we should fix this.
pass
def _AddTests(test_class):
"""Adds test methods to TfTrtIntegrationTestBase."""
def _GetTest(run_params):
"""Gets a single test method based on the parameters."""
def _Test(self):
logging.info(
"Running test %s with parameters: use_optimizer=%s, "
"precision_mode=%s, dynamic_engine=%s",
"testTfTrt_" + run_params.test_name, run_params.use_optimizer,
run_params.precision_mode, run_params.dynamic_engine)
self.RunTest(run_params)
return _Test
use_optimizer_options = [False, True]
dynamic_engine_options = [False, True]
for (use_optimizer, precision_mode, dynamic_engine) in itertools.product(
use_optimizer_options, PRECISION_MODES, dynamic_engine_options):
if IsQuantizationMode(precision_mode):
if use_optimizer:
# TODO(aaroey): if use_optimizer is True we need to get the inference
# graphdef using custom python wrapper class, which is not currently
# supported yet.
continue
if not dynamic_engine:
# TODO(aaroey): construction of static calibration engine is not
# supported yet.
continue
conversion = "OptimizerConversion" if use_optimizer else "ToolConversion"
engine_type = ("DynamicEngine" if dynamic_engine else "StaticEngine")
test_name = "%s_%s_%s" % (conversion, precision_mode, engine_type)
run_params = RunParams(
use_optimizer=use_optimizer,
precision_mode=precision_mode,
dynamic_engine=dynamic_engine,
test_name=test_name)
setattr(test_class, "testTfTrt_" + test_name, _GetTest(run_params))
if trt_convert.is_tensorrt_enabled():
_AddTests(TfTrtIntegrationTestBase)
| {
"content_hash": "89c012fbbeecc6914b8b9e8c6c76bc6e",
"timestamp": "",
"source": "github",
"line_count": 533,
"max_line_length": 80,
"avg_line_length": 38.68105065666041,
"alnum_prop": 0.6634330891982345,
"repo_name": "dongjoon-hyun/tensorflow",
"id": "a725d0651c92fe18bcfd284cffd40cdfec2e6c69",
"size": "21306",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/tensorrt/test/tf_trt_integration_test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3301"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "446293"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50950243"
},
{
"name": "CMake",
"bytes": "198845"
},
{
"name": "Dockerfile",
"bytes": "36908"
},
{
"name": "Go",
"bytes": "1285854"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "869263"
},
{
"name": "Jupyter Notebook",
"bytes": "2611125"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "62216"
},
{
"name": "Objective-C",
"bytes": "15634"
},
{
"name": "Objective-C++",
"bytes": "101475"
},
{
"name": "PHP",
"bytes": "5191"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40335927"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "487251"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
import copy
from osc_lib.tests import utils as osc_lib_utils
from openstackclient import shell
from openstackclient.tests.unit.integ import base as test_base
from openstackclient.tests.unit import test_shell
class TestIntegV2ProjectID(test_base.TestInteg):
def setUp(self):
super(TestIntegV2ProjectID, self).setUp()
env = {
"OS_AUTH_URL": test_base.V2_AUTH_URL,
"OS_PROJECT_ID": test_shell.DEFAULT_PROJECT_ID,
"OS_USERNAME": test_shell.DEFAULT_USERNAME,
"OS_PASSWORD": test_shell.DEFAULT_PASSWORD,
"OS_IDENTITY_API_VERSION": "2",
}
self.useFixture(osc_lib_utils.EnvFixture(copy.deepcopy(env)))
self.token = test_base.make_v2_token(self.requests_mock)
def test_project_id_env(self):
_shell = shell.OpenStackShell()
_shell.run("extension list".split())
# Check general calls
self.assertNotEqual(len(self.requests_mock.request_history), 0)
# Check discovery request
self.assertEqual(
test_base.V2_AUTH_URL,
self.requests_mock.request_history[0].url,
)
# Check auth request
auth_req = self.requests_mock.request_history[1].json()
self.assertEqual(
test_shell.DEFAULT_PROJECT_ID,
auth_req['auth']['tenantId'],
)
def test_project_id_arg(self):
_shell = shell.OpenStackShell()
_shell.run("--os-project-id wsx extension list".split())
# Check general calls
self.assertNotEqual(len(self.requests_mock.request_history), 0)
# Check discovery request
self.assertEqual(
test_base.V2_AUTH_URL,
self.requests_mock.request_history[0].url,
)
# Check auth request
auth_req = self.requests_mock.request_history[1].json()
self.assertEqual(
"wsx",
auth_req['auth']['tenantId'],
)
class TestIntegV2ProjectName(test_base.TestInteg):
def setUp(self):
super(TestIntegV2ProjectName, self).setUp()
env = {
"OS_AUTH_URL": test_base.V2_AUTH_URL,
"OS_PROJECT_NAME": test_shell.DEFAULT_PROJECT_NAME,
"OS_USERNAME": test_shell.DEFAULT_USERNAME,
"OS_PASSWORD": test_shell.DEFAULT_PASSWORD,
"OS_IDENTITY_API_VERSION": "2",
}
self.useFixture(osc_lib_utils.EnvFixture(copy.deepcopy(env)))
self.token = test_base.make_v2_token(self.requests_mock)
def test_project_name_env(self):
_shell = shell.OpenStackShell()
_shell.run("extension list".split())
# Check general calls
self.assertNotEqual(len(self.requests_mock.request_history), 0)
# Check discovery request
self.assertEqual(
test_base.V2_AUTH_URL,
self.requests_mock.request_history[0].url,
)
# Check auth request
auth_req = self.requests_mock.request_history[1].json()
self.assertEqual(
test_shell.DEFAULT_PROJECT_NAME,
auth_req['auth']['tenantName'],
)
def test_project_name_arg(self):
_shell = shell.OpenStackShell()
_shell.run("--os-project-name qaz extension list".split())
# Check general calls
self.assertNotEqual(len(self.requests_mock.request_history), 0)
# Check discovery request
self.assertEqual(
test_base.V2_AUTH_URL,
self.requests_mock.request_history[0].url,
)
# Check auth request
auth_req = self.requests_mock.request_history[1].json()
self.assertEqual(
"qaz",
auth_req['auth']['tenantName'],
)
class TestIntegV3ProjectID(test_base.TestInteg):
def setUp(self):
super(TestIntegV3ProjectID, self).setUp()
env = {
"OS_AUTH_URL": test_base.V3_AUTH_URL,
"OS_PROJECT_ID": test_shell.DEFAULT_PROJECT_NAME,
# "OS_PROJECT_DOMAIN_ID": test_shell.DEFAULT_PROJECT_DOMAIN_ID,
# "OS_USER_DOMAIN_ID": test_shell.DEFAULT_USER_DOMAIN_ID,
"OS_USERNAME": test_shell.DEFAULT_USERNAME,
"OS_PASSWORD": test_shell.DEFAULT_PASSWORD,
"OS_IDENTITY_API_VERSION": "3",
}
self.useFixture(osc_lib_utils.EnvFixture(copy.deepcopy(env)))
self.token = test_base.make_v3_token(self.requests_mock)
def test_project_id_env(self):
_shell = shell.OpenStackShell()
_shell.run("extension list".split())
# Check general calls
self.assertNotEqual(len(self.requests_mock.request_history), 0)
# Check discovery request
self.assertEqual(
test_base.V3_AUTH_URL,
self.requests_mock.request_history[0].url,
)
# Check auth request
auth_req = self.requests_mock.request_history[1].json()
self.assertIsNone(auth_req['auth'].get('tenantId', None))
self.assertIsNone(auth_req['auth'].get('tenantName', None))
def test_project_id_arg(self):
_shell = shell.OpenStackShell()
_shell.run("--os-project-id wsx extension list".split())
# Check general calls
self.assertNotEqual(len(self.requests_mock.request_history), 0)
# Check discovery request
self.assertEqual(
test_base.V3_AUTH_URL,
self.requests_mock.request_history[0].url,
)
# Check auth request
auth_req = self.requests_mock.request_history[1].json()
self.assertIsNone(auth_req['auth'].get('tenantId', None))
self.assertIsNone(auth_req['auth'].get('tenantName', None))
class TestIntegV3ProjectName(test_base.TestInteg):
def setUp(self):
super(TestIntegV3ProjectName, self).setUp()
env = {
"OS_AUTH_URL": test_base.V3_AUTH_URL,
"OS_PROJECT_NAME": test_shell.DEFAULT_PROJECT_NAME,
# "OS_PROJECT_DOMAIN_ID": test_shell.DEFAULT_PROJECT_DOMAIN_ID,
# "OS_USER_DOMAIN_ID": test_shell.DEFAULT_USER_DOMAIN_ID,
"OS_USERNAME": test_shell.DEFAULT_USERNAME,
"OS_PASSWORD": test_shell.DEFAULT_PASSWORD,
"OS_IDENTITY_API_VERSION": "3",
}
self.useFixture(osc_lib_utils.EnvFixture(copy.deepcopy(env)))
self.token = test_base.make_v3_token(self.requests_mock)
def test_project_name_env(self):
_shell = shell.OpenStackShell()
_shell.run("extension list".split())
# Check general calls
self.assertNotEqual(len(self.requests_mock.request_history), 0)
# Check discovery request
self.assertEqual(
test_base.V3_AUTH_URL,
self.requests_mock.request_history[0].url,
)
# Check auth request
auth_req = self.requests_mock.request_history[1].json()
self.assertEqual(
test_shell.DEFAULT_PROJECT_NAME,
auth_req['auth']['scope']['project']['name'],
)
self.assertIsNone(auth_req['auth'].get('tenantId', None))
self.assertIsNone(auth_req['auth'].get('tenantName', None))
def test_project_name_arg(self):
_shell = shell.OpenStackShell()
_shell.run("--os-project-name wsx extension list".split())
# Check general calls
self.assertNotEqual(len(self.requests_mock.request_history), 0)
# Check discovery request
self.assertEqual(
test_base.V3_AUTH_URL,
self.requests_mock.request_history[0].url,
)
# Check auth request
auth_req = self.requests_mock.request_history[1].json()
self.assertEqual(
"wsx",
auth_req['auth']['scope']['project']['name'],
)
self.assertIsNone(auth_req['auth'].get('tenantId', None))
self.assertIsNone(auth_req['auth'].get('tenantName', None))
| {
"content_hash": "9393d3d723659bcc19f13ac92ea315d9",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 75,
"avg_line_length": 32.19591836734694,
"alnum_prop": 0.6005324543610547,
"repo_name": "dtroyer/python-openstackclient",
"id": "4e707a376286f14bef14d21d1f48c36a571bf642",
"size": "8452",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstackclient/tests/unit/integ/cli/test_project.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4040230"
},
{
"name": "Shell",
"bytes": "299"
}
],
"symlink_target": ""
} |
from ec2api.tests.functional import base
from ec2api.tests.functional import config
CONF = config.CONF
class RegionTest(base.EC2TestCase):
def test_describe_regions(self):
data = self.client.describe_regions()
self.assertNotEmpty(data['Regions'])
region = CONF.aws.aws_region
if not region:
return
regions = [r['RegionName'] for r in data['Regions']]
self.assertIn(region, regions)
def test_describe_zones(self):
data = self.client.describe_availability_zones()
self.assertNotEmpty(data['AvailabilityZones'])
region = CONF.aws.aws_region
if not region:
return
# TODO(andrey-mp): add checking of other fields of returned data
| {
"content_hash": "3af08dea3d505d4b820102dc6be390eb",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 72,
"avg_line_length": 26.928571428571427,
"alnum_prop": 0.649867374005305,
"repo_name": "vishnu-kumar/ec2-api",
"id": "cabda0312d3338502f3163c8627ba57300af421e",
"size": "1390",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ec2api/tests/functional/api/test_regions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1702647"
},
{
"name": "Shell",
"bytes": "29444"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import featureditem.fields
import sorl.thumbnail.fields
class Migration(migrations.Migration):
dependencies = [
('manifestos', '0004_auto_20150513_2110'),
]
operations = [
migrations.AddField(
model_name='collection',
name='art_file',
field=sorl.thumbnail.fields.ImageField(height_field='art_height', upload_to='img', width_field='art_width', verbose_name='splash art file', blank=True),
),
migrations.AddField(
model_name='collection',
name='art_height',
field=models.PositiveSmallIntegerField(null=True, blank=True),
),
migrations.AddField(
model_name='collection',
name='art_width',
field=models.PositiveSmallIntegerField(null=True, blank=True),
),
migrations.AddField(
model_name='collection',
name='featured',
field=featureditem.fields.FeaturedField(default=False),
),
migrations.AlterField(
model_name='historicalmanifesto',
name='featured',
field=featureditem.fields.FeaturedField(default=False),
),
migrations.AlterField(
model_name='manifesto',
name='featured',
field=featureditem.fields.FeaturedField(default=False),
),
]
| {
"content_hash": "400e01174e9f46b836aa8418b1048af7",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 164,
"avg_line_length": 32.31111111111111,
"alnum_prop": 0.5990371389270976,
"repo_name": "gwhigs/digital-manifesto",
"id": "80625f949fc2ca23184d8077e8509ac5597e29ba",
"size": "1478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manifestos/migrations/0005_auto_20150825_1913.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9511"
},
{
"name": "Dockerfile",
"bytes": "193"
},
{
"name": "HTML",
"bytes": "25972"
},
{
"name": "JavaScript",
"bytes": "6833"
},
{
"name": "Python",
"bytes": "62072"
},
{
"name": "Shell",
"bytes": "6094"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import dotmanca.storage
import gallery.models
class Migration(migrations.Migration):
dependencies = [
('gallery', '0003_auto_20170701_1436'),
]
operations = [
migrations.AlterField(
model_name='galleryimage',
name='the_image',
field=models.ImageField(storage=dotmanca.storage.OverwriteStorage, upload_to=gallery.models.gallery_image_upload_to),
),
]
| {
"content_hash": "3745263a7294d28e1928949334ea8550",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 129,
"avg_line_length": 24.428571428571427,
"alnum_prop": 0.6627680311890838,
"repo_name": "evanepio/dotmanca",
"id": "1f96c9ed156866ff7d70e4ad68866e307175d321",
"size": "586",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "gallery/migrations/0004_auto_20170705_2030.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2704"
},
{
"name": "Dockerfile",
"bytes": "1874"
},
{
"name": "HTML",
"bytes": "12635"
},
{
"name": "Makefile",
"bytes": "192"
},
{
"name": "Python",
"bytes": "83822"
},
{
"name": "Shell",
"bytes": "860"
}
],
"symlink_target": ""
} |
from ctypes import *
import os
import subprocess
import unittest
import sys
import gdb_rsp
import test_files.test_basic as test_basic
# These are set up by Main().
COMMAND = None
class Tests(unittest.TestCase):
def test_disconnect(self):
process = gdb_rsp.PopenDebugStub(COMMAND)
try:
# Connect.
connection = gdb_rsp.GdbRspConnection()
connection.Close()
# Reconnect 3 times.
for _ in range(3):
connection = gdb_rsp.GdbRspConnection()
connection.Close()
finally:
gdb_rsp.KillProcess(process)
def test_kill(self):
process = gdb_rsp.PopenDebugStub(COMMAND)
try:
connection = gdb_rsp.GdbRspConnection()
# Request killing the target.
reply = connection.RspRequest('k')
self.assertEqual(reply, 'OK')
signal = c_byte(process.wait()).value
self.assertEqual(signal, gdb_rsp.RETURNCODE_KILL)
finally:
gdb_rsp.KillProcess(process)
def test_detach(self):
process = gdb_rsp.PopenDebugStub(COMMAND)
try:
connection = gdb_rsp.GdbRspConnection()
# Request detaching from the target.
# This resumes execution, so we get the normal exit() status.
reply = connection.RspRequest('D')
self.assertEqual(reply, 'OK')
finally:
gdb_rsp.KillProcess(process)
def Main():
index = sys.argv.index('--')
args = sys.argv[index + 1:]
# The remaining arguments go to unittest.main().
global COMMAND
COMMAND = args
unittest.main(argv=sys.argv[:index])
if __name__ == '__main__':
Main()
| {
"content_hash": "144e12f0e62b062a960753bbc80c7666",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 67,
"avg_line_length": 25.360655737704917,
"alnum_prop": 0.6593406593406593,
"repo_name": "youtube/cobalt_sandbox",
"id": "9fd9628b5a6b9463f351ca257c9e26c1064a5835",
"size": "1843",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "third_party/v8/test/debugging/wasm/gdb-server/connect.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
import sklearn.naive_bayes
import submissions
from data import *
mnb = sklearn.naive_bayes.MultinomialNB()
mnb.fit(train, target)
pred = mnb.predict(test)
submissions.save_csv(pred, "multinomial_naive_bayes.csv")
| {
"content_hash": "547ea7c6c33af300b7ce1ecfe9dfe7bb",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 57,
"avg_line_length": 22.153846153846153,
"alnum_prop": 0.7708333333333334,
"repo_name": "wjfwzzc/Kaggle_Script",
"id": "33e222ec85d46f514fd810da2966840198c2af4a",
"size": "313",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "titanic/multinomial_naive_bayes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73011"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from typing import Dict, Type
from .base import CloudRedisTransport
from .grpc import CloudRedisGrpcTransport
from .grpc_asyncio import CloudRedisGrpcAsyncIOTransport
from .rest import CloudRedisRestTransport
from .rest import CloudRedisRestInterceptor
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[CloudRedisTransport]]
_transport_registry['grpc'] = CloudRedisGrpcTransport
_transport_registry['grpc_asyncio'] = CloudRedisGrpcAsyncIOTransport
_transport_registry['rest'] = CloudRedisRestTransport
__all__ = (
'CloudRedisTransport',
'CloudRedisGrpcTransport',
'CloudRedisGrpcAsyncIOTransport',
'CloudRedisRestTransport',
'CloudRedisRestInterceptor',
)
| {
"content_hash": "9a5fb782b536cc0ab2fd3a4ee093af2a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 81,
"avg_line_length": 33.130434782608695,
"alnum_prop": 0.8031496062992126,
"repo_name": "googleapis/gapic-generator-python",
"id": "bfee7cc29940cf18c9ae30d5868eac75fcd8a445",
"size": "1362",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/integration/goldens/redis/google/cloud/redis_v1/services/cloud_redis/transports/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2702"
},
{
"name": "Jinja",
"bytes": "767902"
},
{
"name": "Python",
"bytes": "4802905"
},
{
"name": "Shell",
"bytes": "31013"
},
{
"name": "Starlark",
"bytes": "26281"
}
],
"symlink_target": ""
} |
"""
Copyright 2013 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from distutils.core import setup
from distutils.extension import Extension
import os
import sys
import shutil
setup(name='SyndicateMail',
version='0.1',
description='Syndicate-powered Mail',
url='https://github.com/jcnelson/syndicatemail',
author=['Jude Nelson', 'Wathsala Vithanage'],
author_email='syndicate@lists.cs.princeton.edu',
license='Apache 2.0',
package_dir = {"syndicatemail": "."},
packages = ['syndicatemail', "syndicatemail.common", "syndicatemail.endpoint", "syndicatemail.server"],
# TODO: auto-build the javascript, html, css, etc. from the UI directory
data_files = {"syndicatemail": ["syndicatemail/ui/*"]},
zip_safe=False)
| {
"content_hash": "c73523f0b667bfd644a08aafa3c56373",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 109,
"avg_line_length": 36.916666666666664,
"alnum_prop": 0.7133182844243793,
"repo_name": "jcnelson/syndicatemail",
"id": "5dec46a376d6e11e0d3f96a983cd6d821e7cc42c",
"size": "1348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "66355"
},
{
"name": "Python",
"bytes": "213909"
},
{
"name": "Shell",
"bytes": "1433"
},
{
"name": "TeX",
"bytes": "92466"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import getopt
import glob
import sys
from PIL import Image
if len(sys.argv) == 1:
print("PIL File 0.4/2003-09-30 -- identify image files")
print("Usage: pilfile [option] files...")
print("Options:")
print(" -f list supported file formats")
print(" -i show associated info and tile data")
print(" -v verify file headers")
print(" -q quiet, don't warn for unidentified/missing/broken files")
sys.exit(1)
try:
opt, args = getopt.getopt(sys.argv[1:], "fqivD")
except getopt.error as v:
print(v)
sys.exit(1)
verbose = quiet = verify = 0
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats:")
for i in id:
print(i, end=' ')
sys.exit(1)
elif o == "-i":
verbose = 1
elif o == "-q":
quiet = 1
elif o == "-v":
verify = 1
elif o == "-D":
Image.DEBUG += 1
def globfix(files):
# expand wildcards where necessary
if sys.platform == "win32":
out = []
for file in files:
if glob.has_magic(file):
out.extend(glob.glob(file))
else:
out.append(file)
return out
return files
for file in globfix(args):
try:
im = Image.open(file)
print("%s:" % file, im.format, "%dx%d" % im.size, im.mode, end=' ')
if verbose:
print(im.info, im.tile, end=' ')
print()
if verify:
try:
im.verify()
except:
if not quiet:
print("failed to verify image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
except IOError as v:
if not quiet:
print(file, "failed:", v)
except:
import traceback
if not quiet:
print(file, "failed:", "unexpected error")
traceback.print_exc(file=sys.stdout)
| {
"content_hash": "d541422f61c0583a433403ad10c6bd0d",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 77,
"avg_line_length": 25.807692307692307,
"alnum_prop": 0.5136612021857924,
"repo_name": "mnahm5/django-estore",
"id": "e673ce97bff5f4c010cb56b3bd6fb8938accd4d1",
"size": "2613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scripts/pilfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "1351"
},
{
"name": "Batchfile",
"bytes": "2695"
},
{
"name": "C",
"bytes": "460931"
},
{
"name": "C++",
"bytes": "26115"
},
{
"name": "CSS",
"bytes": "144496"
},
{
"name": "HTML",
"bytes": "155544"
},
{
"name": "JavaScript",
"bytes": "206799"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "24837167"
},
{
"name": "Shell",
"bytes": "4408"
},
{
"name": "Tcl",
"bytes": "1237789"
},
{
"name": "TeX",
"bytes": "1527"
}
],
"symlink_target": ""
} |
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
class ExampleModel(models.Model):
VOTE_CHOICES = (
("up", "Up"),
("down", "Down"),
)
user = models.ForeignKey('auth.user')
vote = models.CharField(max_length=4, choices=VOTE_CHOICES, blank=True)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
def __unicode__(self):
return u"{0} – {1}".format(self.pk, self.user)
| {
"content_hash": "11e58a36f10d88e9d680f9d7d4dfc09f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 75,
"avg_line_length": 32.68421052631579,
"alnum_prop": 0.6827697262479872,
"repo_name": "django-djam/django-djam",
"id": "d154fb46ee1f508f93b390ef0e76192facc62b04",
"size": "640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_project/test_project/example_app/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "195505"
},
{
"name": "JavaScript",
"bytes": "109927"
},
{
"name": "Python",
"bytes": "89575"
},
{
"name": "Ruby",
"bytes": "440"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
} |
"""Scraper for Kentucky Supreme Court
CourtID: ky
Court Short Name: Ky.
History:
2014-08-07: Created by mlr.
2014-12-15: Updated to fetch 100 results instead of 30. For reasons unknown
this returns more recent results, while a smaller value leaves
out some of the most recent items.
Notes:
This scraper is unique. Kentucky does not provide case names in the search
results pages, making them almost useless. They have a separate interface
though that allows a lookup by case year and number, which *does* provide
the case name (and lots of other information). Note that it only provides
this information for supreme court cases, so extending this to do kyctapp
won't be possible.
Our process is therefore:
1. Get anything we can from the search results.
1. For extra meta data, query system number two and get back the case
name.
1. Merge it all.
Also fun, they use IP addresses instead of DNS and hide them behind HTML
frames hosted by real domains.
I tried calling to get more information, and though they've heard of us (a
first!), they didn't want to help, and seemed downright aggressive in their
opposition. Curious. Anyway, don't bother calling again.
You can contact support@dtsearch.com with questions about the search
interface. Best of luck.
"""
import re
from datetime import datetime
import requests
from lxml import html
from juriscraper.lib.string_utils import titlecase
from juriscraper.DeferringList import DeferringList
from juriscraper.OpinionSite import OpinionSite
class Site(OpinionSite):
def __init__(self):
super(Site, self).__init__()
self.court_id = self.__module__
self.url = 'http://162.114.92.72/dtsearch.asp'
self.parameters = {
'SearchForm': '%%SearchForm%%',
'autoStopLimit': '0',
'cmd': 'search',
'fuzziness': '0',
'index': 'D:\\Inetpub\\wwwroot\\indices\\SupremeCourt_Index',
# This can be bumped as high as you dream to get back *massive*
# result sets.
'maxFiles': '100',
# This is a dtSearch trick that brings back all results.
'request': 'xfirstword',
# This provides things in newest-first order, but indeed shows the
# most recent N items.
'sort': 'Date'
}
self.method = 'POST'
self.docket_number_regex = re.compile('(?P<year>\d{4})-(?P<court>[SC]{2})-(?P<docket_num>\d+)')
self.hrefs_contain = 'Opinions'
def _get_download_urls(self):
path = "//a[@href[contains(., '{m}')]]".format(m=self.hrefs_contain)
elems = filter(self._has_valid_docket_number, self.html.xpath(path))
return [e.xpath('./@href')[0] for e in elems]
def _get_case_names(self):
def fetcher(e):
"""This reaches out to a secondary system and scrapes the correct
info.
"""
if self.method == 'LOCAL':
return "No case names fetched during tests."
else:
url = 'http://162.114.92.78/dockets/SearchCaseDetail.asp'
anchor_text = html.tostring(e, method='text', encoding='unicode')
m = self.docket_number_regex.search(anchor_text)
r = requests.post(
url,
headers={'User-Agent': 'Juriscraper'},
data={
'txtyear': m.group('year'),
'txtcasenumber': m.group('docket_num').strip('0'),
'cmdnamesearh': 'Search',
},
)
# Throw an error if a bad status code is returned.
r.raise_for_status()
# If the encoding is iso-8859-1, switch it to cp1252 (a superset)
if r.encoding == 'ISO-8859-1':
r.encoding = 'cp1252'
# Grab the content
text = self._clean_text(r.text)
html_tree = html.fromstring(text)
# And finally, we parse out the good stuff.
parties_path = "//tr[descendant::text()[contains(., 'Appell')]]//td[3]//text()"
case_name_parts = []
for s in html_tree.xpath(parties_path):
if s.strip():
case_name_parts.append(titlecase(s.strip().lower()))
if len(case_name_parts) == 2:
break
return ' v. '.join(case_name_parts)
# Get the docket numbers to use for queries.
path = "//a[@href[contains(., '{m}')]]".format(m=self.hrefs_contain)
elements = filter(self._has_valid_docket_number, self.html.xpath(path))
return DeferringList(seed=elements, fetcher=fetcher)
def _get_docket_numbers(self):
path = "//a[@href[contains(., '{m}')]]".format(
m=self.hrefs_contain)
elements = filter(self._has_valid_docket_number, self.html.xpath(path))
return map(self._return_docket_number_from_str, elements)
def _has_valid_docket_number(self, e):
text = html.tostring(e, method='text', encoding='unicode')
if self.docket_number_regex.search(text):
return True
else:
return False
def _return_docket_number_from_str(self, e):
s = html.tostring(e, method='text', encoding='unicode')
m = self.docket_number_regex.search(s)
return '{year} {court} {docket_num}'.format(
year=m.group('year'),
court=m.group('court'),
docket_num=m.group('docket_num')
)
def _get_case_dates(self):
path = "//tr[descendant::a[@href[contains(., '{m}')]]]/td[2]".format(
m=self.hrefs_contain)
elements = filter(self._has_valid_docket_number, self.html.xpath(path))
dates = []
for e in elements:
for s in e.xpath('.//text()'):
s = s.strip()
try:
dates.append(datetime.strptime(s, '%m/%d/%Y').date())
except ValueError:
pass
return dates
def _get_precedential_statuses(self):
# noinspection PyUnresolvedReferences
return ['Unknown'] * len(self.case_names)
| {
"content_hash": "9d25a06e0f5c951d0d349264dff50f2b",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 103,
"avg_line_length": 39.93125,
"alnum_prop": 0.5686335889810612,
"repo_name": "brianwc/juriscraper",
"id": "1aca59bcdb7bc0293f9547168703dfa37248c71e",
"size": "6389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opinions/united_states/state/ky.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "24111143"
},
{
"name": "Python",
"bytes": "661024"
}
],
"symlink_target": ""
} |
import datetime
import collections
from .config import Config
class Stmt(object):
"""Base class for all statement classes.
"""
def __init__(self, placeholder=None, quote_all_col_refs=None, quote_all_values=None, **kwargs):
"""Constructor
Keyword Arguments:
placeholder (string, optional): Placeholder character to use when parameterization is enabled.
Default is None, in which case the :py:class:`mysqlstmt.config.Config` setting will be used.
quote_all_col_refs (bool, optional): Quote all column references.
Default is None, in which case the :py:class:`mysqlstmt.config.Config` setting will be used.
quote_all_values (bool, optional): The predicate for the outer WHERE condition, either 'AND' or 'OR'.
Default is None, in which case the :py:class:`mysqlstmt.config.Config` setting will be used.
**kwargs: Base class arguments.
Note:
Default settings for ``placeholder``, ``quote_all_col_refs`` and ``quote_all_values``
are set through :py:class:`mysqlstmt.config.Config`
"""
super(Stmt, self).__init__(**kwargs)
if placeholder is False or Config.placeholder is False:
self.placeholder = False
else:
self.placeholder = Config.placeholder if placeholder is None else placeholder
if quote_all_values is False or Config.quote_all_values is False:
self.quote_all_values = False
else:
self.quote_all_values = Config.quote_all_values if quote_all_values is None else quote_all_values
if quote_all_col_refs is False or Config.quote_all_col_refs is False:
self.quote_all_col_refs = False
else:
self.quote_all_col_refs = Config.quote_all_col_refs if quote_all_col_refs is None else quote_all_col_refs
# Public properties
self.query_options = [] # can append with ``set_option``
def __call__(self, *args, **kwargs):
"""Returns SQL statement created by :py:meth:`sql`"""
return self.sql()
def __str__(self):
"""Returns SQL statement created by :py:meth:`sql`"""
sql_t = self.sql()
return sql_t[0] if self.placeholder else sql_t
def sql(self):
"""Derived classes must override and build appropriate SQL statement.
Returns:
Either a tuple ``(SQL statement, parameterized values)`` if ``placeholder`` is set,
otherwise SQL statement as string.
Raises:
ValueError: The statement cannot be created with the given attributes.
NotImplementedError: There is no base class implementation.
"""
raise NotImplementedError
def quote_col_ref(self, col_ref):
"""Quote column reference with backticks.
Arguments:
col_ref (string): Column reference. Can be prefixed with the table name.
Returns:
string: Column reference quoted with backticks (``).
Notes:
Column reference will not be quoted if it contains a backtick, space or parenthesis.
"""
if self.quote_all_col_refs:
if ' ' in col_ref:
return col_ref # COLUMN AS ALIAS
if '(' in col_ref:
return col_ref # FUNCTION(COLUMN)
if '`' in col_ref:
return col_ref # already quoted
col_ref_parts = col_ref.split('.')
if len(col_ref_parts) > 1:
table, col = col_ref_parts
return '{0}.`{1}`'.format(table, col)
else:
return '`{0}`'.format(col_ref)
return col_ref
def pickle(self, val):
"""Convert variable value into a value that can be included in a SQL statement.
Arguments:
val (mixed): Value to pickle.
Returns:
tuple: (string, bool) Pickled value as a string and True if value should be parameterized.
"""
if val is None:
return 'NULL', False
elif val is True:
return '1', False
elif val is False:
return '0', False
elif isinstance(val, basestring):
return val, True
elif isinstance(val, (int, long, float)):
return str(val), False
elif isinstance(val, datetime.datetime):
return val.strftime('%Y-%m-%d %H:%M:%S'), True
elif isinstance(val, datetime.date):
return val.strftime('%Y-%m-%d'), True
elif isinstance(val, datetime.time):
return val.strftime('%H:%M:%S'), True
return unicode(val), True
@staticmethod
def quote(val):
"""Quotes a string with single quotemarks and adds backslashes to escape embedded single quotes.
Arguments:
val (string): Column reference. Can be prefixed with the table name.
Returns:
string: Column reference quoted with backticks (``).
Note:
This is a very simple implementation. Conventional wisdom says you should *never* need
to use this functionality. Whenever possible you should use parameterization,
or escape values before they get to creating SQL statments.
"""
return u"'{0}'".format(val.replace("'", "\\'"))
@staticmethod
def table_alias(table_factor):
"""Returns the table alias from a table factor.
Arguments:
table_factor (string): Table factor reference such as ``table`` or ``table AS alias``.
Returns:
string
"""
table_parts = table_factor.split('AS')
return table_factor if len(table_parts) == 1 else table_parts[1].strip()
def _parameterize_values(self, list_or_value, inline_values, param_values):
"""Parameterizes a value or list of values.
Evaluates or iterates through ``list_or_value`` and if the value can be parameterized
it is added to ``param_values``, otherwise it is added to ``inline_values``.
Arguments:
list_or_value (list or mixed): A value or list of values to replace with ``placeholder``.
inline_values (list or None): List to append non-parameterized values to;
set to None to force everything to be parameterized.
param_values (list or None): List to append parameterized values to;
set to None to force everything not to be inlined.
"""
if isinstance(list_or_value, collections.Iterable) and not isinstance(list_or_value, basestring):
for val in list_or_value:
self._parameterize_values(val, inline_values, param_values)
else:
using_placeholder = False if (param_values is None) else bool(self.placeholder)
quote = False if using_placeholder is True else self.quote_all_values
list_or_value, can_paramize_val = self.pickle(list_or_value)
if inline_values is None:
param_values.append(list_or_value)
elif can_paramize_val and using_placeholder:
inline_values.append(self.placeholder)
param_values.append(list_or_value)
elif can_paramize_val and quote:
inline_values.append(self.quote(list_or_value))
else:
inline_values.append(list_or_value)
def set_option(self, list_or_value):
"""Sets query options (the keywords at the beginning of the SQL statement).
Arguments:
list_or_value (list or mixed): An option or list of options.
Returns:
object: self
"""
if isinstance(list_or_value, collections.Iterable) and not isinstance(list_or_value, basestring):
for val in list_or_value:
self.set_option(val)
else:
self.query_options.append(list_or_value)
return self
| {
"content_hash": "4dcd7e917ac55772f516c756284dec2d",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 117,
"avg_line_length": 39.666666666666664,
"alnum_prop": 0.6009030477862787,
"repo_name": "lovette/mysqlstmt",
"id": "48e7da4eff971bc571771a9f87cb7970c6c1d5f4",
"size": "7998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysqlstmt/stmt.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "159914"
}
],
"symlink_target": ""
} |
from google.cloud import dialogflowcx_v3beta1
def sample_get_environment():
# Create a client
client = dialogflowcx_v3beta1.EnvironmentsClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.GetEnvironmentRequest(
name="name_value",
)
# Make the request
response = client.get_environment(request=request)
# Handle the response
print(response)
# [END dialogflow_v3beta1_generated_Environments_GetEnvironment_sync]
| {
"content_hash": "08e8195cce782487d9e01d16276bb596",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 69,
"avg_line_length": 25.31578947368421,
"alnum_prop": 0.7276507276507277,
"repo_name": "googleapis/python-dialogflow-cx",
"id": "b6acb5c3d47e257ffeeec8807a77eb1bdbadea4c",
"size": "1880",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/dialogflow_v3beta1_generated_environments_get_environment_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "10904903"
},
{
"name": "Shell",
"bytes": "30681"
}
],
"symlink_target": ""
} |
import logging
from getpass import getpass
from os import path
from django.core.management.base import BaseCommand
from ...utils.download import _download, gen_monthly_report_links, login
class Command(BaseCommand):
help = 'Download your monthly patron reports from Patreon (CSV).'
def add_arguments(self, parser):
parser.add_argument('-u', '--username', help="Patreon username")
parser.add_argument('-p', '--password', help="Patreon password")
parser.add_argument('-d', '--directory', help="save CSV reports to DIRECTORY", default='.')
def handle(self, *args, **options):
if not options['username']:
options['username'] = input("Patreon username: ")
if not options['password']:
options['password'] = getpass("Patreon password (will be hidden): ")
if options['verbosity'] > 0:
logging.basicConfig(level=logging.INFO)
session = login(options['username'], options['password'])
for filename, url in gen_monthly_report_links(session):
_download(session, url, path.join(options['directory'], filename))
| {
"content_hash": "fa39fd95db4c5b7316328408b26e7b9b",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 99,
"avg_line_length": 37.7,
"alnum_prop": 0.6569407603890363,
"repo_name": "DjangoGirls/djangogirls",
"id": "c5aec4b97dde3803d651ce894dfe2455be798cf2",
"size": "1131",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "patreonmanager/management/commands/download_csv.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "428291"
},
{
"name": "JavaScript",
"bytes": "13711"
},
{
"name": "Python",
"bytes": "422267"
},
{
"name": "Stylus",
"bytes": "32803"
}
],
"symlink_target": ""
} |
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Validate that use of \glossaries in TeX source files causes SCons to
be aware of the necessary created glossary files.
Test configuration contributed by Robert Managan.
"""
import os
import TestSCons
test = TestSCons.TestSCons()
latex = test.where_is('latex')
if not latex:
test.skip_test("Could not find latex; skipping test(s).\n")
gloss = os.system('kpsewhich glossaries.sty')
if not gloss==0:
test.skip_test("glossaries.sty not installed; skipping test(s).\n")
test.write('SConstruct', """\
import os
env = Environment()
env.PDF('glossaries', 'glossaries.tex')
""")
test.write('glossaries.tex', r"""
\documentclass{article}
\usepackage[acronym]{glossaries}
\newglossaryentry{nix}{
name={Nix},
description={Version 5}
}
\newacronym{gnu}{GNU}{GNU's Not UNIX}
\makeglossaries
\begin{document}
Acronyms \gls{gnu} and glossary entries \gls{nix}.
\printglossary[type=acronym]
\printglossary[type=main]
\end{document}
""")
test.run(arguments = '.', stderr=None)
test.must_exist(test.workpath('glossaries.acn'))
test.must_exist(test.workpath('glossaries.acr'))
test.must_exist(test.workpath('glossaries.alg'))
test.must_exist(test.workpath('glossaries.aux'))
test.must_exist(test.workpath('glossaries.fls'))
test.must_exist(test.workpath('glossaries.glg'))
test.must_exist(test.workpath('glossaries.glo'))
test.must_exist(test.workpath('glossaries.ist'))
test.must_exist(test.workpath('glossaries.log'))
test.must_exist(test.workpath('glossaries.pdf'))
test.run(arguments = '-c .')
x = "Could not remove 'glossaries.aux': No such file or directory"
test.must_not_contain_any_line(test.stdout(), [x])
test.must_not_exist(test.workpath('glossaries.acn'))
test.must_not_exist(test.workpath('glossaries.acr'))
test.must_not_exist(test.workpath('glossaries.alg'))
test.must_not_exist(test.workpath('glossaries.aux'))
test.must_not_exist(test.workpath('glossaries.fls'))
test.must_not_exist(test.workpath('glossaries.glg'))
test.must_not_exist(test.workpath('glossaries.glo'))
test.must_not_exist(test.workpath('glossaries.ist'))
test.must_not_exist(test.workpath('glossaries.log'))
test.must_not_exist(test.workpath('glossaries.pdf'))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "c70f02f8df3c6e7306705d3d53e04f7d",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 71,
"avg_line_length": 25.344086021505376,
"alnum_prop": 0.7335596096733136,
"repo_name": "timj/scons",
"id": "05ddf1284bb189c05ff6db3e252f5abd56abd50f",
"size": "3459",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/TEX/glossaries.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2437"
},
{
"name": "C",
"bytes": "593"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1817"
},
{
"name": "DTrace",
"bytes": "180"
},
{
"name": "HTML",
"bytes": "857084"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7393581"
},
{
"name": "Ruby",
"bytes": "10888"
},
{
"name": "Shell",
"bytes": "52480"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
import os
import sys
import json
import fnmatch
import unittest
from uuid import uuid4
HERE = os.path.dirname(os.path.abspath(__file__))
WEBAPP_PATH = os.path.abspath(os.path.join(HERE, 'webapp'))
sys.path.insert(0, HERE)
sys.path.insert(1, WEBAPP_PATH)
FLASK_COVERAGE = os.environ.get('FLASK_COVERAGE', None)
if FLASK_COVERAGE:
try:
import coverage
except ImportError:
msg = 'Não é possível importar o modulo coverage'
raise RuntimeError(msg)
COV = None
if FLASK_COVERAGE:
COV = coverage.coverage(branch=True, include='opac/webapp/*')
COV.start()
else:
COV = None
from webapp import create_app, dbsql, dbmongo, mail, cache # noqa
from opac_schema.v1.models import Collection, Sponsor, Journal, Issue, Article, AuditLogEntry # noqa
from webapp import controllers # noqa
from webapp.utils import reset_db, create_db_tables, create_user, create_new_journal_page, send_audit_log_daily_report # noqa
from webapp.utils.journal_static_page import PAGE_NAMES_BY_LANG # noqa
from flask_script import Manager, Shell # noqa
from flask_migrate import Migrate, MigrateCommand # noqa
from webapp.admin.forms import EmailForm # noqa
from webapp.tasks import setup_scheduler, clear_scheduler # noqa
app = create_app()
migrate = Migrate(app, dbsql)
manager = Manager(app)
manager.add_command('dbsql', MigrateCommand)
def make_shell_context():
app_models = {
'Collection': Collection,
'Sponsor': Sponsor,
'Journal': Journal,
'Issue': Issue,
'Article': Article,
'AuditLogEntry': AuditLogEntry,
}
return dict(
app=app,
dbsql=dbsql,
dbmongo=dbmongo,
mail=mail,
cache=cache,
**app_models)
manager.add_command("shell", Shell(make_context=make_shell_context))
@manager.command
@manager.option('-f', '--force', dest='force_clear', default=False)
def invalidate_cache(force_clear=False):
def clear_cache():
keys_invalidated = cache.clear()
print('Chaves invalidadas: %s' % keys_invalidated)
print('Cache zerado com sucesso!')
if force_clear:
clear_cache()
else:
# pedimos confirmação
user_confirmation = None
while user_confirmation is None:
user_confirmation = input('Tem certeza que deseja limpar todo o cache? [y/N]: ').strip()
if user_confirmation.lower() == 'y':
clear_cache()
elif user_confirmation.lower() == 'n':
print('O cache permance sem mudanças!')
else:
user_confirmation = None
print('Resposta inválida. Responda "y" ou "n" (sem aspas)')
@manager.command
@manager.option('-p', '--pattern', dest='pattern')
@manager.option('-f', '--force', dest='force_clear', default=False)
def invalidate_cache_pattern(pattern, force_clear=False):
_redis_cli = cache.cache._client
def count_key_pattern(pattern):
keys_found = _redis_cli.scan_iter(match=pattern)
return len([k for k in keys_found])
def delete_cache_pattern(pattern):
print('Removendo do cache as chaves com pattern: %s' % pattern)
keys_found = _redis_cli.scan_iter(match=pattern)
deleted_keys_count = _redis_cli.delete(*keys_found)
print('%s chaves removidas do cache' % deleted_keys_count)
if not pattern:
print('Não é possível buscar chaves se o pattern é vazio!')
print('O cache permance sem mudanças!')
else:
if force_clear:
keys_found_count = count_key_pattern(pattern)
if keys_found_count > 0:
delete_cache_pattern(pattern)
else:
print('Não foi encontrada nenhuma chave pelo pattern: %s' % pattern)
else:
# pedimos confirmação
user_confirmation = None
while user_confirmation is None:
user_confirmation = input('Tem certeza que deseja limpar o cache filtrando pelo pattern: %s? [y/N]: ' % pattern).strip()
if user_confirmation.lower() == 'y':
keys_found_count = count_key_pattern(pattern)
if keys_found_count > 0:
delete_cache_pattern(pattern)
else:
print('Não foi encontrada nenhuma chave pelo pattern: %s' % pattern)
elif user_confirmation.lower() == 'n':
print('O cache permance sem mudanças!')
else:
user_confirmation = None
print('Resposta inválida. Responda "y" ou "n" (sem aspas)')
@manager.command
@manager.option('-f', '--force', dest='force_delete', default=False)
def reset_dbsql(force_delete=False):
"""
Remove todos os dados do banco de dados SQL.
Por padrão: se o banco SQL já existe, o banco não sera modificado.
Utilize o parametro --force=True para forçar a remoção dos dados.
Uma vez removidos os dados, todas as tabelas serão criadas vazias.
"""
db_path = app.config['DATABASE_PATH']
if not os.path.exists(db_path) or force_delete:
reset_db()
print('O banco esta limpo!')
print('Para criar um novo usuário execute o comando: create_superuser')
print('python manager.py create_superuser')
else:
print('O banco já existe (em %s).' % db_path)
print('remova este arquivo manualmente ou utilize --force.')
@manager.command
def create_tables_dbsql(force_delete=False):
"""
Cria as tabelas necessárias no banco de dados SQL.
"""
db_path = app.config['DATABASE_PATH']
if not os.path.exists(db_path):
create_db_tables()
print('As tabelas foram criadas com sucesso!')
else:
print('O banco já existe (em %s).' % db_path)
print('Para remover e crias as tabelas use o camando:')
print('python manager.py reset_dbsql --help')
@manager.command
def create_superuser():
"""
Cria um novo usuário a partir dos dados inseridos na linha de comandos.
Para criar um novo usuário é necessario preencher:
- email (deve ser válido é único, se já existe outro usuário com esse email deve inserir outro);
- senha (modo echo off)
- e se o usuário tem email confirmado (caso sim, pode fazer logim, caso que não, deve verificar por email)
"""
user_email = None
user_password = None
while user_email is None:
user_email = input('Email: ').strip()
if user_email == '':
user_email = None
print('Email não pode ser vazio')
else:
form = EmailForm(data={'email': user_email})
if not form.validate():
user_email = None
print('Deve inserir um email válido!')
elif controllers.get_user_by_email(user_email):
user_email = None
print('Já existe outro usuário com esse email!')
os.system("stty -echo")
while user_password is None:
user_password = input('Senha: ').strip()
if user_password == '':
user_password = None
print('Senha não pode ser vazio')
os.system("stty echo")
email_confirmed = input('\nEmail confirmado? [y/N]: ').strip()
if email_confirmed.upper() in ('Y', 'YES'):
email_confirmed = True
else:
email_confirmed = False
print('Deve enviar o email de confirmação pelo admin')
# cria usuario
create_user(user_email, user_password, email_confirmed)
print('Novo usuário criado com sucesso!')
@manager.command
@manager.option('-p', '--pattern', dest='pattern')
@manager.option('-f', '--failfast', dest='failfast')
def test(pattern=None, failfast=False):
""" Executa tests unitarios.
Lembre de definir a variável: OPAC_CONFIG="path do arquivo de conf para testing"
antes de executar este comando:
> export OPAC_CONFIG="/foo/bar/config.testing" && python manager.py test
Utilize -p para rodar testes específicos'
ex.: export OPAC_CONFIG="config/templates/testing.template" && python opac/manager.py test -p "test_main_views"
"""
failfast = True if failfast else False
if COV and not FLASK_COVERAGE:
os.environ['FLASK_COVERAGE'] = '1'
os.execvp(sys.executable, [sys.executable] + sys.argv)
if pattern is None:
tests = unittest.TestLoader().discover('tests')
else:
tests = unittest.TestLoader().loadTestsFromName('tests.' + pattern)
result = unittest.TextTestRunner(verbosity=2, failfast=failfast).run(tests)
if COV:
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
# basedir = os.path.abspath(os.path.dirname(__file__))
# covdir = 'tmp/coverage'
# COV.html_report(directory=covdir)
# print('HTML version: file://%s/index.html' % covdir)
COV.erase()
if result.wasSuccessful():
return sys.exit()
else:
return sys.exit(1)
@manager.command
@manager.option('-d', '--directory', dest="pattern")
def upload_images(directory='.'):
"""
Esse comando realiza um cadastro em massa de images com extensões contidas
na variável: app.config['IMAGES_ALLOWED_EXTENSIONS_RE'] de um diretório
determinado pelo parâmetro --directory (utilizar caminho absoluto).
"""
extensions = app.config['IMAGES_ALLOWED_EXTENSIONS_RE']
print("Coletando todas a imagens da pasta: %s" % directory)
for root, dirnames, filenames in os.walk(directory):
for extension in extensions:
for filename in fnmatch.filter(filenames, extension):
image_path = os.path.join(root, filename)
create_image(image_path, filename)
@manager.command
@manager.option('-d', '--domain', dest="domain")
@manager.option('-f', '--filename', dest="filename")
def populate_database(domain="http://127.0.0.1", filename="fixtures/default_info.json"):
"""
Esse comando realiza o cadastro dos metadados de uma coleção a partir de um
arquivo JSON, localizado em: fixtures/default_info.json.
Por padrão o conteúdo é o da coleção SciELO Brasil.
As imagens são coletadas da pasta: fixtures/imgs
"""
data = json.load(open(filename))
collection = Collection.objects.first()
if collection:
collection.name = data['collection']['name']
collection.address1 = data['collection']['address1']
collection.address2 = data['collection']['address2']
print("Cadastrando as imagens da coleção %s" % collection.name)
for imgs in data['collection']['images']:
for key, val in imgs.items():
img = create_image(val, os.path.basename(val))
setattr(collection, key, '%s%s' % (domain, img.get_absolute_url))
print("Cadastrando os financiadores da coleção %s" % collection.name)
sponsors = []
for _ in data['sponsors']:
sponsor = Sponsor()
sponsor._id = str(uuid4().hex)
sponsor.order = _['order']
sponsor.name = _['name']
img = create_image(_['logo_path'], os.path.basename(_['logo_path']))
sponsor.logo_url = '%s%s' % (domain, img.get_absolute_url)
sponsor.url = _['url']
sponsor.save()
sponsors.append(sponsor)
collection.sponsors = sponsors
collection.save()
else:
print("Nenhuma coleção encontrada!")
@manager.command
@manager.option('-f', '--filename', dest="filename")
def populate_pdfs_path_html_version(filename="fixtures/pdfs_path_file_html_version.json"):
"""
Esse comando tem como responsabilidade enriquecer os registros de artigos com o caminho da URL do site anterior para os PDFs.
Além do nome dos arquivos em PDF o site precisa dos caminhos dos PDFs para que seja possível resolver URLs como http://www.scielo.br/pdf/aa/v36n2/v36n2a09.pdf
O arquivo ``fixtures/pdfs_path_file_html_version.json` é extraído uma única vez, contendo todos os PIDs da versão HTML, caminho dos PDFs e idioma.
Estrutura do arquivo: ``pdfs_path_file_html_version.json``:
[
{
"pid": "S0044-59672004000100001",
"file_path": "/pdf/aa/v34n1/v34n1a01.pdf",
"lang": "pt"
},
]
"""
with open(filename) as fp:
data_json = json.load(fp)
for art_pdf_path in data_json:
art = controllers.get_article_by_pid(art_pdf_path['pid'])
if art.pdfs:
for pdf in art.pdfs:
if art_pdf_path.get('lang', '') == pdf.get('lang'):
pdf['file_path'] = art_pdf_path.get('file_path')
art.save()
print("PDF do PID: %s atualizado com sucesso, caminho %s" % (art_pdf_path.get('pid'), art_pdf_path.get('file_path')))
else:
print("PDF do PID: %s não encontrado na base de dados do OPAC." % (art_pdf_path.get('pid')))
@manager.command
def populate_journal_pages():
"""
Esse comando faz o primeiro registro das páginas secundárias
dos periódicos localizado em /data/pages.
Cada vez que executa cria um novo registro.
As páginas dos periódico SciELO contém a seguinte estrutura:
- eaboutj.htm
- einstruc.htm
- eedboard.htm
- esubscrp.htm (Assinatura)
Sendo que o prefixo "e" indica Espanhol, prefixo "i" Inglês e o prefixo "p"
português.
OBS.: A extensão dos html é htm.
Assinatura não esta sendo importada conforme mencionado no tk:
https://github.com/scieloorg/opac/issues/630
"""
acron_list = [journal.acronym for journal in Journal.objects.all()]
j_total = len(acron_list)
done = 0
for j, acron in enumerate(sorted(acron_list)):
print('{}/{} {}'.format(j+1, j_total, acron))
for lang, files in PAGE_NAMES_BY_LANG.items():
create_new_journal_page(acron, files, lang)
done += 1
print('Páginas: {}\nPeriódicos: {}'.format(done, j_total))
@manager.command
@manager.option('-c', '--cronstr', dest='cron_string')
def setup_scheduler_tasks(cron_string=None):
cron_string = cron_string or app.config['MAILING_CRON_STRING']
if not cron_string:
print('Valor de cron nulo para o scheduler. Definit cron pelo parâmetro ou pela var env.')
return sys.exit(1)
queue_name = 'mailing'
clear_scheduler(queue_name)
setup_scheduler(send_audit_log_daily_report, queue_name, cron_string)
@manager.command
def clear_scheduler_tasks():
clear_scheduler(queue_name='mailing')
@manager.command
def send_audit_log_emails():
print('coletando registros de auditoria modificados hoje!')
print('envio de notificações habilitado? (AUDIT_LOG_NOTIFICATION_ENABLED): ',
app.config['AUDIT_LOG_NOTIFICATION_ENABLED'])
print('lista recipients (além dos usuários) AUDIT_LOG_NOTIFICATION_RECIPIENTS: ',
app.config['AUDIT_LOG_NOTIFICATION_RECIPIENTS'])
send_audit_log_daily_report()
if __name__ == '__main__':
manager.run()
| {
"content_hash": "536fd22bdc7319cb99641a1ec7bf4cb1",
"timestamp": "",
"source": "github",
"line_count": 444,
"max_line_length": 162,
"avg_line_length": 33.87837837837838,
"alnum_prop": 0.6294375747905864,
"repo_name": "scieloorg/opac",
"id": "54cc21106798ec8e18a5bdb8a2901f5e904a18d0",
"size": "15172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opac/manager.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "28018"
},
{
"name": "Dockerfile",
"bytes": "1669"
},
{
"name": "HTML",
"bytes": "250667"
},
{
"name": "JavaScript",
"bytes": "846068"
},
{
"name": "Less",
"bytes": "142376"
},
{
"name": "Makefile",
"bytes": "7235"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "888421"
},
{
"name": "Shell",
"bytes": "567"
}
],
"symlink_target": ""
} |
from maps.models import UserProfilePicture, UserBio, Adventure, Map , MapSegment, WayPoint, DayNote
from maps.serealizers import UserProfilePictureSerializer, UserBioSerializer, AdventureSerializer, MapSerializer, MapSegmentSerializer
from maps.forms import ProfilePhotoUploadForm
from django.http import JsonResponse
from collections import OrderedDict
from django.contrib.auth.models import User
###REST
#from django.shortcuts import render
#from django.http import HttpResponse
#from django.http import HttpRequest
#from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from django.views.decorators.csrf import csrf_exempt
from django_maps import settings
from datetime import datetime
import pytz
import os
@csrf_exempt
def userInfo(request,userId=None):
if request.method == 'GET':
adventures = Adventure.objects.filter(owner_id=userId)
advSerializer = AdventureSerializer(adventures,many=True)
bio = UserBio.objects.filter(user=userId).first()
bioSerializer = None
if type(bio)!=type(None):
bioSerializer = UserBioSerializer(bio,many=False).data
userPicture = UserProfilePicture.objects.filter(user=userId)
userPicData = None
if type(userPicture)!=type(None):
userPicData = UserProfilePictureSerializer(userPicture,many=True).data
total = {"adventures":advSerializer.data,"bio":bioSerializer,"profile_pictures":userPicData}
return JsonResponse(total, safe=False)
elif request.method == 'POST': #NO PUT,Only POST
data = JSONParser().parse(request)
user = User.objects.get(pk=int(data["userId"]))
#check if exists:
bioQuery = UserBio.objects.filter(user=user)
bio = None
if bioQuery.exists():
bioQuery.update(bio=data["bio"])
bio = bioQuery.first()
else:
bio = UserBio(user=user,bio=data["bio"])
bio.save()
serialized = UserBioSerializer(bio)
return JsonResponse(serialized.data,safe=False)
def handle_uploaded_file(userId,f):
#write file as is, convert to decided format, add to db, delete old ?
#save file as is
target = settings.USER_MEDIA_ROOT+'/'+str(userId)+'/profile_pictures/'+f.name
with open(target, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
#convert,resize,thumbs
#add to db
user = User.objects.get(pk=int(userId))
my_date = datetime.now(pytz.timezone('US/Pacific'))
profilePicture = UserProfilePicture(user=user,uploadTime=my_date,active=True)
profilePicture.save()
#temp solution... need to convert to target file with right extension, and then delete the old file.
#rename
newName = settings.USER_MEDIA_ROOT+'/'+str(userId)+'/profile_pictures/'+str(profilePicture.id)+".png"
os.rename(target,newName)
return profilePicture
@csrf_exempt
def profilePhoto(request):
if request.method == 'POST':
form = ProfilePhotoUploadForm(request.POST,request.FILES)
if form.is_valid():
userId = form.data['userId']
f = request.FILES['file']
userPic = handle_uploaded_file(userId,f)
return JsonResponse({"picId":userPic.id},safe=False)
@csrf_exempt
def adventures(request,advId=None):
if request.method == 'POST':
data = JSONParser().parse(request)
user = User.objects.get(pk=int(data["owner"]))
advName = data["name"]
advType = data["advType"]
advStatus = data["advStatus"]
#If advStatus = active, need to unset previous active.
adv = Adventure(name=advName,owner=user,advType=advType,advStatus=advStatus)
adv.save()
serialized = AdventureSerializer(adv)
return JsonResponse(serialized.data,safe=False)
elif request.method == "DELETE":
advToDel = Adventure.objects.get(pk=advId)
advToDel.delete()
serialized = AdventureSerializer(advToDel)
#TODO Probably should return success code instead of object...
return JsonResponse(serialized.data,safe=False)
elif request.method == "PUT":
data = JSONParser().parse(request)
owner = User.objects.get(pk=int(data["owner"]))
advName = data["name"]
advType = data["advType"]
advStatus = data["advStatus"]
#If advStatus = active, need to unset previous active.
adv = Adventure(id=advId,name=advName,owner=owner,advType=advType,advStatus=advStatus)
adv.save()
serialized = AdventureSerializer(adv)
return JsonResponse(serialized.data,safe=False)
@csrf_exempt
def advsOverview(request,userId):
"""This returns all start and end points from all the segments in all the maps, for all adventures.
The goal is to visualize roughly all the travelling the user has done."""
if request.method=="GET":
allAdvs = []
#this is awful
advs = Adventure.objects.filter(owner_id=userId).all()
for adv in advs:
advCoordinates = []
distance = 0
startTime = None
endTime = None
#get startTime
advMaps = adv.maps.all()
if advMaps.count()>0:
startSegments = advMaps[0].segments.all()
if startSegments.count()>0:
startTime = startSegments[0].startTime
endSegments = advMaps[advMaps.count()-1].segments.all()
if endSegments.count()>0:
endTime = endSegments[endSegments.count()-1].endTime
for advMap in advMaps:
segments = advMap.segments.all()
for segment in segments:
start = segment.coordinates.first()
startPoint = [float(start.lat),float(start.lng)]
end = segment.coordinates.last()
endPoint = [float(end.lat),float(end.lng)]
###TODO: allow for non-continuous lines?
#Add first segment
if len(advCoordinates) == 0:
advCoordinates.append(startPoint)
advCoordinates.append(endPoint)
#If this is not the first segment, check if startPoint is same as last endPoint
else:
if advCoordinates[len(advCoordinates)-1]==startPoint:
advCoordinates.append(endPoint)
else:
advCoordinates.append(startPoint)
advCoordinates.append(endPoint)
distance += segment.distance
advGeoJson = {'type':'Feature',
'properties':{'advId':adv.id,
'distance': distance,
'startTime': startTime,
'endTime': endTime},
'geometry':{'type':'LineString',
'coordinates': advCoordinates}}
allAdvs.append(advGeoJson)
adventuresGeoJson = {'type':'FeatureCollection','properties':{'userId':userId},'features': allAdvs}
return JsonResponse(adventuresGeoJson, safe=False)
def makeGeoJsonFromMap(map):
features = []
for segment in map.segments.all():
coordinates = []
for coord in segment.coordinates.all():
coordinates.append([float(coord.lat),float(coord.lng)])
geometry = {"type":"LineString","coordinates":coordinates}
notesResults = segment.dayNotes.first()
notes = []
if type(notesResults)!=type(None):
note = notesResults.note
notes.append(note)
segmentDict = {"type":"Feature",
"properties": {"segmentId":segment.id,
'distance':segment.distance,
'startTime':segment.startTime,
'endTime':segment.endTime,
'delay': segment.delay,
'notes':notes},
"geometry":geometry}
features.append(segmentDict)
mapDict = {"type":"FeatureCollection","properties":{"mapId": map.id,"mapName":map.name},"features":features}
return mapDict
#TODO : Use makeGeoJsonFromSegment inside makeGeoJsonFromMap...
def makeGeoJsonFromSegment(segment):
coordinates = []
for coord in segment.coordinates.all():
coordinates.append([float(coord.lat),float(coord.lng)])
geometry = {"type":"LineString","coordinates":coordinates}
notes = []
for notesObj in segment.dayNotes.all():
notes.append(notesObj.note)
feature = {"type":"Feature",
"properties":{"segmentId": segment.id,
"distance": segment.distance,
"delay": segment.delay,
"notes": notes,
'startTime':segment.startTime,
'endTime':segment.endTime},
"geometry":geometry}
return feature
@csrf_exempt
def advMaps(request,advId=None):
if request.method == 'GET':
queryset = Map.objects.filter(adv=advId)
results = []
for i in queryset.all():
myMap = {"id":i.id,"name":i.name,"distance":i.total_distance()}
results.append(myMap)
return JsonResponse(results,safe=False)
#TODO This should move to /api/rest/maps
if request.method == 'POST':
data = JSONParser().parse(request)
adv = Adventure.objects.get(id=int(data["advId"]))
map = Map(name=data["name"],adv=adv)
map.save()
#Hmm, maybe I should just get a serializer...
result = {"id":map.id,"name":map.name,"features":[],"distance":0 }
return JsonResponse(result,safe=False)
@csrf_exempt
def maps(request,mapId=None):
if request.method == 'GET':
map = Map.objects.filter(id=mapId).first()
results = []
if map!=None:
results = makeGeoJsonFromMap(map)
return JsonResponse(results,safe=False)
elif request.method == 'DELETE':
mapToDel = Map.objects.get(id=mapId)
mapToDel.delete()
serialized = MapSerializer(mapToDel)
return JsonResponse(serialized.data,safe=False)
@csrf_exempt
def mapSegment(request,segmentId=None):
if request.method=='POST':
data = JSONParser().parse(request)
#Try validation with serializers...
if "mapId" in data.keys() and data["mapId"] is not None:
map = Map.objects.get(id=int(data["mapId"]))
startTime = None
endTime = None
dayNotes = None
if "startTime" in data.keys():
startTime = data["startTime"]
if "endTime" in data.keys():
endTime = data["endTime"]
distance = data["distance"]
waypoints = data["waypoints"]
if 'dayNotes' in data.keys():
dayNotes = data['dayNotes']
delay = data['delay']
#create segment
mapSegment = MapSegment(map=map,
startTime=startTime,
endTime=endTime,
distance = distance,
delay=delay)
mapSegment.save()
if dayNotes:
dayNoteObj = DayNote(segment = mapSegment,note = dayNotes)
dayNoteObj.save()
#create waypoints
for point in waypoints:
waypointObj = WayPoint(segment = mapSegment,
lat = point[1],
lng = point[0])
waypointObj.save()
#return custom geoJson
result = makeGeoJsonFromSegment(mapSegment)
return JsonResponse(result,safe=False)
else:
return JsonResponse({"error":"Bad input"})
| {
"content_hash": "dbeadc33ccb6dd243f394912f1fe811b",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 134,
"avg_line_length": 35.53370786516854,
"alnum_prop": 0.5609486166007905,
"repo_name": "agilman/django-maps",
"id": "6f3681738dc9484e3f559ff4f1d759eb5d0eb55a",
"size": "12650",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "maps/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "113359"
},
{
"name": "HTML",
"bytes": "34272"
},
{
"name": "JavaScript",
"bytes": "4537"
},
{
"name": "Python",
"bytes": "9574"
}
],
"symlink_target": ""
} |
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class channelCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'activate_entitlement': ('name', 'request_id', ),
'cancel_entitlement': ('name', 'request_id', ),
'change_offer': ('name', 'offer', 'parameters', 'purchase_order_id', 'request_id', ),
'change_parameters': ('name', 'parameters', 'request_id', 'purchase_order_id', ),
'change_renewal_settings': ('name', 'renewal_settings', 'request_id', ),
'check_cloud_identity_accounts_exist': ('parent', 'domain', ),
'create_channel_partner_link': ('parent', 'channel_partner_link', ),
'create_channel_partner_repricing_config': ('parent', 'channel_partner_repricing_config', ),
'create_customer': ('parent', 'customer', ),
'create_customer_repricing_config': ('parent', 'customer_repricing_config', ),
'create_entitlement': ('parent', 'entitlement', 'request_id', ),
'delete_channel_partner_repricing_config': ('name', ),
'delete_customer': ('name', ),
'delete_customer_repricing_config': ('name', ),
'fetch_report_results': ('report_job', 'page_size', 'page_token', ),
'get_channel_partner_link': ('name', 'view', ),
'get_channel_partner_repricing_config': ('name', ),
'get_customer': ('name', ),
'get_customer_repricing_config': ('name', ),
'get_entitlement': ('name', ),
'import_customer': ('domain', 'cloud_identity_id', 'parent', 'overwrite_if_exists', 'auth_token', 'channel_partner_id', 'customer', ),
'list_channel_partner_links': ('parent', 'page_size', 'page_token', 'view', ),
'list_channel_partner_repricing_configs': ('parent', 'page_size', 'page_token', 'filter', ),
'list_customer_repricing_configs': ('parent', 'page_size', 'page_token', 'filter', ),
'list_customers': ('parent', 'page_size', 'page_token', 'filter', ),
'list_entitlements': ('parent', 'page_size', 'page_token', ),
'list_offers': ('parent', 'page_size', 'page_token', 'filter', 'language_code', ),
'list_products': ('account', 'page_size', 'page_token', 'language_code', ),
'list_purchasable_offers': ('customer', 'create_entitlement_purchase', 'change_offer_purchase', 'page_size', 'page_token', 'language_code', ),
'list_purchasable_skus': ('customer', 'create_entitlement_purchase', 'change_offer_purchase', 'page_size', 'page_token', 'language_code', ),
'list_reports': ('parent', 'page_size', 'page_token', 'language_code', ),
'list_skus': ('parent', 'account', 'page_size', 'page_token', 'language_code', ),
'list_subscribers': ('account', 'page_size', 'page_token', ),
'list_transferable_offers': ('parent', 'sku', 'cloud_identity_id', 'customer_name', 'page_size', 'page_token', 'language_code', ),
'list_transferable_skus': ('parent', 'cloud_identity_id', 'customer_name', 'page_size', 'page_token', 'auth_token', 'language_code', ),
'lookup_offer': ('entitlement', ),
'provision_cloud_identity': ('customer', 'cloud_identity_info', 'user', 'validate_only', ),
'register_subscriber': ('account', 'service_account', ),
'run_report_job': ('name', 'date_range', 'filter', 'language_code', ),
'start_paid_service': ('name', 'request_id', ),
'suspend_entitlement': ('name', 'request_id', ),
'transfer_entitlements': ('parent', 'entitlements', 'auth_token', 'request_id', ),
'transfer_entitlements_to_google': ('parent', 'entitlements', 'request_id', ),
'unregister_subscriber': ('account', 'service_account', ),
'update_channel_partner_link': ('name', 'channel_partner_link', 'update_mask', ),
'update_channel_partner_repricing_config': ('channel_partner_repricing_config', ),
'update_customer': ('customer', 'update_mask', ),
'update_customer_repricing_config': ('customer_repricing_config', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=channelCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the channel client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
| {
"content_hash": "087de5a296c791ec877032cbea94f423",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 150,
"avg_line_length": 44.106280193236714,
"alnum_prop": 0.6028477546549835,
"repo_name": "googleapis/python-channel",
"id": "643b44ce3e775e4e865a34df31cb30ab5f5a0a81",
"size": "9754",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scripts/fixup_channel_v1_keywords.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1755858"
},
{
"name": "Shell",
"bytes": "30663"
}
],
"symlink_target": ""
} |
from py2neo.core import Graph, Node, Relationship, Path, PropertySet, LabelSet
from py2neo.batch.core import Batch, Job, CypherJob, Target
from py2neo.batch.push import PushNodeLabelsJob, PushPropertiesJob, PushPropertyJob
from py2neo.cypher.util import StartOrMatch
__all__ = ["CreateNodeJob", "CreateRelationshipJob", "CreatePathJob", "CreateUniquePathJob",
"DeleteEntityJob", "DeletePropertyJob", "DeletePropertiesJob",
"AddNodeLabelsJob", "RemoveNodeLabelJob", "WriteBatch"]
def _create_query(graph, p, unique=False):
start_or_match_clause = StartOrMatch(graph)
path, values, params = [], [], {}
def append_node(i, node):
if node is None:
path.append("(n{0})".format(i))
values.append("n{0}".format(i))
elif node.bound:
path.append("(n{0})".format(i))
start_or_match_clause.node("n%s" % i, "{i%s}" % i)
params["i{0}".format(i)] = node._id
values.append("n{0}".format(i))
else:
path.append("(n{0} {{p{0}}})".format(i))
params["p{0}".format(i)] = node.properties
values.append("n{0}".format(i))
def append_rel(i, rel):
if rel.properties:
path.append("-[r{0}:`{1}` {{q{0}}}]->".format(i, rel.type))
params["q{0}".format(i)] = PropertySet(rel.properties)
values.append("r{0}".format(i))
else:
path.append("-[r{0}:`{1}`]->".format(i, rel.type))
values.append("r{0}".format(i))
append_node(0, p.nodes[0])
for i, rel in enumerate(p.rels):
append_rel(i, rel)
append_node(i + 1, p.nodes[i + 1])
clauses = []
if start_or_match_clause:
clauses.append(start_or_match_clause.string)
if unique:
clauses.append("CREATE UNIQUE p={0}".format("".join(path)))
else:
clauses.append("CREATE p={0}".format("".join(path)))
clauses.append("RETURN p")
query = "\n".join(clauses)
return query, params
class CreateNodeJob(Job):
target = Target("node")
def __init__(self, **properties):
Job.__init__(self, "POST", self.target, properties)
class CreateRelationshipJob(Job):
def __init__(self, start_node, rel, end_node, **properties):
body = {"type": rel.type, "to": Target(end_node).uri_string}
if rel.properties or properties:
body["data"] = dict(rel.properties, **properties)
Job.__init__(self, "POST", Target(start_node, "relationships"), body)
class CreatePathJob(CypherJob):
def __init__(self, *entities):
# Fudge to allow graph to be passed in so Cypher syntax
# detection can occur. Can be removed when only 2.0+ is
# supported.
if isinstance(entities[0], Graph):
self.graph, entities = entities[0], entities[1:]
CypherJob.__init__(self, *_create_query(self.graph, Path(*entities)))
class CreateUniquePathJob(CypherJob):
def __init__(self, *entities):
# Fudge to allow graph to be passed in so Cypher syntax
# detection can occur. Can be removed when only 2.0+ is
# supported.
if isinstance(entities[0], Graph):
self.graph, entities = entities[0], entities[1:]
CypherJob.__init__(self, *_create_query(self.graph, Path(*entities), unique=True))
class DeleteEntityJob(Job):
def __init__(self, entity):
Job.__init__(self, "DELETE", Target(entity))
class DeletePropertyJob(Job):
def __init__(self, entity, key):
Job.__init__(self, "DELETE", Target(entity, "properties", key))
class DeletePropertiesJob(Job):
def __init__(self, entity):
Job.__init__(self, "DELETE", Target(entity, "properties"))
class AddNodeLabelsJob(Job):
def __init__(self, node, *labels):
Job.__init__(self, "POST", Target(node, "labels"), list(LabelSet(labels)))
class RemoveNodeLabelJob(Job):
def __init__(self, entity, label):
Job.__init__(self, "DELETE", Target(entity, "labels", label))
class WriteBatch(Batch):
""" Generic batch execution facility for data write requests. Most methods
return a :py:class:`BatchRequest <py2neo.neo4j.BatchRequest>` object that
can be used as a reference in other methods. See the
:py:meth:`create <py2neo.neo4j.WriteBatch.create>` method for an example
of this.
"""
def __init__(self, graph):
Batch.__init__(self, graph)
def run(self):
self.graph.batch.run(self)
def stream(self):
for result in self.graph.batch.stream(self):
yield result.content
def submit(self):
return [result.content for result in self.graph.batch.submit(self)]
def create(self, abstract):
""" Create a node or relationship based on the abstract entity
provided. For example::
batch = WriteBatch(graph)
a = batch.create(node(name="Alice"))
b = batch.create(node(name="Bob"))
batch.create(rel(a, "KNOWS", b))
results = batch.submit()
:param abstract: node or relationship
:type abstract: abstract
:return: batch request object
"""
entity = self.graph.cast(abstract)
if isinstance(entity, Node):
return self.append(CreateNodeJob(**entity.properties))
elif isinstance(entity, Relationship):
start_node = self.resolve(entity.start_node)
end_node = self.resolve(entity.end_node)
return self.append(CreateRelationshipJob(start_node, entity.rel, end_node))
else:
raise TypeError(entity)
def create_path(self, node, *rels_and_nodes):
""" Construct a path across a specified set of nodes and relationships.
Nodes may be existing concrete node instances, abstract nodes or
:py:const:`None` but references to other requests are not supported.
:param node: start node
:type node: concrete, abstract or :py:const:`None`
:param rels_and_nodes: alternating relationships and nodes
:type rels_and_nodes: concrete, abstract or :py:const:`None`
:return: batch request object
"""
return self.append(CreatePathJob(self.graph, node, *rels_and_nodes))
def get_or_create_path(self, node, *rels_and_nodes):
""" Construct a unique path across a specified set of nodes and
relationships, adding only parts that are missing. Nodes may be
existing concrete node instances, abstract nodes or :py:const:`None`
but references to other requests are not supported.
:param node: start node
:type node: concrete, abstract or :py:const:`None`
:param rels_and_nodes: alternating relationships and nodes
:type rels_and_nodes: concrete, abstract or :py:const:`None`
:return: batch request object
"""
return self.append(CreateUniquePathJob(self.graph, node, *rels_and_nodes))
def delete(self, entity):
""" Delete a node or relationship from the graph.
:param entity: node or relationship to delete
:type entity: concrete or reference
:return: batch request object
"""
return self.append(DeleteEntityJob(self.resolve(entity)))
def set_property(self, entity, key, value):
""" Set a single property on a node or relationship.
:param entity: node or relationship on which to set property
:type entity: concrete or reference
:param key: property key
:type key: :py:class:`str`
:param value: property value
:return: batch request object
"""
return self.append(PushPropertyJob(self.resolve(entity), key, value))
def set_properties(self, entity, properties):
""" Replace all properties on a node or relationship.
:param entity: node or relationship on which to set properties
:type entity: concrete or reference
:param properties: properties
:type properties: :py:class:`dict`
:return: batch request object
"""
return self.append(PushPropertiesJob(self.resolve(entity), properties))
def delete_property(self, entity, key):
""" Delete a single property from a node or relationship.
:param entity: node or relationship from which to delete property
:type entity: concrete or reference
:param key: property key
:type key: :py:class:`str`
:return: batch request object
"""
return self.append(DeletePropertyJob(self.resolve(entity), key))
def delete_properties(self, entity):
""" Delete all properties from a node or relationship.
:param entity: node or relationship from which to delete properties
:type entity: concrete or reference
:return: batch request object
"""
return self.append(DeletePropertiesJob(self.resolve(entity)))
def add_labels(self, node, *labels):
""" Add labels to a node.
:param node: node to which to add labels
:type entity: concrete or reference
:param labels: text labels
:type labels: :py:class:`str`
:return: batch request object
"""
return self.append(AddNodeLabelsJob(self.resolve(node), *labels))
def remove_label(self, node, label):
""" Remove a label from a node.
:param node: node from which to remove labels (can be a reference to
another request within the same batch)
:param label: text label
:type label: :py:class:`str`
:return: batch request object
"""
return self.append(RemoveNodeLabelJob(self.resolve(node), label))
def set_labels(self, node, *labels):
""" Replace all labels on a node.
:param node: node on which to replace labels (can be a reference to
another request within the same batch)
:param labels: text labels
:type labels: :py:class:`str`
:return: batch request object
"""
return self.append(PushNodeLabelsJob(self.resolve(node), labels))
| {
"content_hash": "196333eb1423a9edaa2bf8f8e19c4e3c",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 92,
"avg_line_length": 36.22222222222222,
"alnum_prop": 0.6226993865030674,
"repo_name": "nicolewhite/py2neo",
"id": "325fad57c2fa264f245333f9502eb28988c26769",
"size": "10738",
"binary": false,
"copies": "2",
"ref": "refs/heads/release/2.0.8",
"path": "py2neo/batch/write.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3840"
},
{
"name": "Makefile",
"bytes": "6765"
},
{
"name": "Python",
"bytes": "879521"
},
{
"name": "Shell",
"bytes": "8124"
}
],
"symlink_target": ""
} |
from .Base import *
from .helper import isCssProp
class FEDUnknownCssNameChecker(RuleChecker):
'''{
"summary":"错误的css属性",
"desc":"本工具会帮您查找错误的CSS属性,如果写错了,即可收到错误提示"
}'''
def __init__(self):
self.id = 'unknown-css-prop'
self.errorLevel = ERROR_LEVEL.ERROR
self.errorMsg = 'unknown attribute name "${name}" found in "${selector}"'
def check(self, rule, config):
return isCssProp(rule.name.lower())
| {
"content_hash": "5486833bb304091a50aa403ecc7140d4",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 81,
"avg_line_length": 27.41176470588235,
"alnum_prop": 0.6180257510729614,
"repo_name": "wangjeaf/CSSCheckStyle",
"id": "7e9f0109b48dbf1ed504dc8758c57a433bd9b714",
"size": "566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ckstyle/plugins/FEDUnknownCssNameChecker.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "373226"
},
{
"name": "Shell",
"bytes": "928"
},
{
"name": "VimL",
"bytes": "1871"
}
],
"symlink_target": ""
} |
from likelihood import Likelihood
from logistic import Logistic
from gaussian import Gaussian
from softmax import Softmax
from regression_network import RegressionNetwork
| {
"content_hash": "cc1e7cf320cf8e9e321480f485324854",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 48,
"avg_line_length": 34.2,
"alnum_prop": 0.8771929824561403,
"repo_name": "VirgiAgl/V_AutoGP",
"id": "aefe56f8872980dd9422f5487f9be8ac55bf310e",
"size": "171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autogp/likelihoods/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "5965"
},
{
"name": "Jupyter Notebook",
"bytes": "98577"
},
{
"name": "Python",
"bytes": "101979"
},
{
"name": "Shell",
"bytes": "989"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
class Account(models.Model):
name = models.CharField(max_length=200)
slug = models.SlugField(max_length=250)
description = models.TextField()
owner = models.ForeignKey(User)
balance = models.PositiveIntegerField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
STATUS_OPTIONS = (
('A', 'Active'),
('I', 'Inactive'),
)
status = models.CharField(max_length=1, choices=STATUS_OPTIONS)
@models.permalink
def get_absolute_url(self):
return ('account_detail', (), {'slug': self.slug})
def __unicode__(self):
return self.name
| {
"content_hash": "cb84ceb28b9b1f0cf1405ead7ecf4b7b",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 67,
"avg_line_length": 29.6,
"alnum_prop": 0.6567567567567567,
"repo_name": "Apreche/Open-Ducats",
"id": "526f18e13de01b82fce36feb3eb1bbafc814c07d",
"size": "740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "accounts/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13917"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from django_irods.views import rest_check_task_status, rest_download, download
urlpatterns = [
# for download request from resource landing page
url(r'^download/(?P<path>.*)$', download, name='django_irods_download'),
# for download request from REST API
url(r'^rest_download/(?P<path>.*)$', rest_download,
name='rest_download'),
# for REST API poll
url(r'^rest_check_task_status/(?P<task_id>[A-z0-9\-]+)$',
rest_check_task_status,
name='rest_check_task_status'),
]
| {
"content_hash": "e47d1321a2393e57a85b5fe345c71353",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 39.142857142857146,
"alnum_prop": 0.656934306569343,
"repo_name": "hydroshare/hydroshare",
"id": "9615dd9a41c977466d51096725109fd8f958649b",
"size": "548",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "django_irods/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "183727"
},
{
"name": "Dockerfile",
"bytes": "1433"
},
{
"name": "HTML",
"bytes": "950010"
},
{
"name": "JavaScript",
"bytes": "1450537"
},
{
"name": "Python",
"bytes": "5786593"
},
{
"name": "R",
"bytes": "4904"
},
{
"name": "Shell",
"bytes": "94173"
},
{
"name": "Vue",
"bytes": "32043"
}
],
"symlink_target": ""
} |
"""Tests for miscellaneous functionality in tensorflow.ops.nn."""
import functools
import math
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.ops.nn_impl import _compute_sampled_logits
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test as test_lib
class ZeroFractionTest(test_lib.TestCase):
def _ZeroFraction(self, x):
assert x.shape
total_elements = np.prod(x.shape)
nonzeros = np.count_nonzero(x.flatten())
return 1.0 - nonzeros / total_elements
@test_util.run_deprecated_v1
def testZeroFraction(self):
x_shape = [5, 17]
x_np = np.random.randint(0, 2, size=x_shape).astype(np.float32)
y_np = self._ZeroFraction(x_np)
x_tf = constant_op.constant(x_np)
x_tf.set_shape(x_shape)
y_tf = nn_impl.zero_fraction(x_tf)
y_tf_np = self.evaluate(y_tf)
eps = 1e-8
self.assertAllClose(y_tf_np, y_np, eps)
@test_util.run_deprecated_v1
def testZeroFractionEmpty(self):
x = np.zeros(0)
y = self.evaluate(nn_impl.zero_fraction(x))
self.assertTrue(np.isnan(y))
@test_util.run_deprecated_v1
def testZeroFraction2_27Zeros(self):
sparsity = nn_impl.zero_fraction(
array_ops.zeros([int(2**27 * 1.01)], dtype=dtypes.int8))
self.assertAllClose(1.0, self.evaluate(sparsity))
@test_util.run_deprecated_v1
def testZeroFraction2_27Ones(self):
sparsity = nn_impl.zero_fraction(
array_ops.ones([int(2**27 * 1.01)], dtype=dtypes.int8))
self.assertAllClose(0.0, self.evaluate(sparsity))
@test_util.run_deprecated_v1
def testUnknownSize(self):
value = array_ops.placeholder(dtype=dtypes.float32)
sparsity = nn_impl.zero_fraction(value)
with self.cached_session() as sess:
self.assertAllClose(0.25,
sess.run(sparsity, {value: [[0., 1.], [0.3, 2.]]}))
@test_util.run_all_in_graph_and_eager_modes
class SoftmaxTest(test_lib.TestCase, parameterized.TestCase):
def _softmax(self, x):
assert len(x.shape) == 2
if x.shape[1] == 0:
return x
m = x.max(1)[:, np.newaxis]
u = np.exp(x - m)
z = u.sum(1)[:, np.newaxis]
return u / z
def testSoftmax(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
y_np = self._softmax(x_np)
x_tf = constant_op.constant(x_np)
y_tf = nn_ops.softmax_v2(x_tf)
y_tf_last_dim = nn_ops.softmax_v2(x_tf, 1)
y_tf_np = self.evaluate(y_tf)
y_tf_last_dim_np = self.evaluate(y_tf_last_dim)
eps = 1e-3
self.assertAllClose(y_tf_np, y_np, eps)
self.assertAllClose(y_tf_last_dim_np, y_np, eps)
def testSoftmaxAxes(self):
arr = np.linspace(0., 1, 12).reshape(3, 4)
x_neg_axis = nn_ops.softmax_v2(arr, axis=-2)
y_pos_axis = nn_ops.softmax_v2(arr, axis=0)
z_gt_axis = nn_ops.softmax_v2(arr, axis=0)
x_neg_axis_tf = self.evaluate(x_neg_axis)
y_pos_axis_tf = self.evaluate(y_pos_axis)
z_gt_axis_tf = self.evaluate(z_gt_axis)
eps = 1e-3
self.assertAllClose(x_neg_axis_tf, y_pos_axis_tf, eps)
self.assertAllClose(y_pos_axis_tf, z_gt_axis_tf, eps)
def testSoftmaxExtendType(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
x_f32_tf = constant_op.constant(x_np)
x_bf16_tf = math_ops.cast(x_f32_tf, dtypes.bfloat16)
y_f32_tf = self.evaluate(nn_ops.softmax(x_f32_tf))
y_bf16_tf = self.evaluate(nn_ops.softmax(x_bf16_tf))
expected = math_ops.cast(y_f32_tf, dtypes.bfloat16)
tol = x_shape[1] * 1e-3
self.assertAllClose(y_bf16_tf, expected, rtol=tol, atol=tol)
@parameterized.parameters(((5, 10),), ((2, 3, 4),))
def testGradient(self, x_shape):
x_np = np.random.randn(*x_shape).astype(np.float64)
x_tf = constant_op.constant(x_np)
theoretical, numerical = gradient_checker_v2.compute_gradient(
nn_ops.softmax_v2, [x_tf])
self.assertAllClose(theoretical, numerical)
@test_util.run_all_in_graph_and_eager_modes
class LogPoissonLossTest(test_lib.TestCase):
def _log_poisson_loss(self, x, z, compute_full_loss=False):
lpl = np.exp(x) - z * x
if compute_full_loss:
stirling_approx = z * np.log(z) - z + 0.5 * np.log(2. * np.pi * z)
lpl += np.ma.masked_array(stirling_approx, mask=(z <= 1)).filled(0.)
return lpl
def testLogPoissonLoss(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
z_np = np.random.randint(0, 5, size=x_shape).astype(np.float32)
y_np = self._log_poisson_loss(x_np, z_np, compute_full_loss=False)
y_np_stirling = self._log_poisson_loss(x_np, z_np, compute_full_loss=True)
y_tf = nn_impl.log_poisson_loss(z_np, x_np, compute_full_loss=False)
y_tf_stirling = nn_impl.log_poisson_loss(z_np, x_np, compute_full_loss=True)
y_tf_np = self.evaluate(y_tf)
y_tf_np_stirling = self.evaluate(y_tf_stirling)
eps = 1e-3
self.assertAllClose(y_tf_np, y_np, eps)
self.assertAllClose(y_tf_np_stirling, y_np_stirling, eps)
def testGradient(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float64)
z_np = np.random.randint(0, 5, size=x_shape).astype(np.float64)
with self.cached_session():
x_tf = constant_op.constant(x_np)
# TODO(b/241834841): Test with `compute_full_loss` set as True
theoretical, numerical = gradient_checker_v2.compute_gradient(
nn_impl.log_poisson_loss, [z_np, x_tf])
self.assertAllClose(theoretical, numerical)
@test_util.run_all_in_graph_and_eager_modes
class LogSoftmaxTest(test_lib.TestCase, parameterized.TestCase):
def _log_softmax(self, x):
assert len(x.shape) == 2
m = x.max(1)[:, np.newaxis]
u = x - m
return u - np.log(np.sum(np.exp(u), 1, keepdims=True))
def testLogSoftmax(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
y_np = self._log_softmax(x_np)
x_tf = constant_op.constant(x_np)
y_tf = nn_ops.log_softmax_v2(x_tf)
y_tf_np = self.evaluate(y_tf)
eps = 1e-3
self.assertAllClose(y_tf_np, y_np, eps)
def testLogSoftmaxAxes(self):
arr = np.linspace(0., 1, 12).reshape(3, 4)
x_neg_axis = nn_ops.log_softmax_v2(arr, axis=-2)
y_pos_axis = nn_ops.log_softmax_v2(arr, axis=0)
z_gt_axis = nn_ops.log_softmax_v2(arr, axis=0)
x_neg_axis_tf = self.evaluate(x_neg_axis)
y_pos_axis_tf = self.evaluate(y_pos_axis)
z_gt_axis_tf = self.evaluate(z_gt_axis)
eps = 1e-3
self.assertAllClose(x_neg_axis_tf, y_pos_axis_tf, eps)
self.assertAllClose(y_pos_axis_tf, z_gt_axis_tf, eps)
@parameterized.parameters(((5, 10),), ((2, 3, 4),))
def testGradient(self, x_shape):
x_np = np.random.randn(*x_shape).astype(np.float64)
with self.cached_session():
x_tf = constant_op.constant(x_np)
theoretical, numerical = gradient_checker_v2.compute_gradient(
nn_ops.log_softmax_v2, [x_tf])
self.assertAllClose(theoretical, numerical)
@test_util.run_all_in_graph_and_eager_modes
class L2LossTest(test_lib.TestCase):
def testL2Loss(self):
for dtype in [dtypes.float32, dtypes.float64] + \
[dtypes.bfloat16] if test_util.is_gpu_available(
cuda_only=True) else []:
x = constant_op.constant([1.0, 0.0, 3.0, 2.0],
shape=[2, 2],
name="x",
dtype=dtype)
l2loss = nn_ops.l2_loss(x)
value = self.evaluate(l2loss)
self.assertAllClose(7.0, value)
def testGradient(self):
x_shape = [20, 7, 3]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
with self.cached_session():
x = constant_op.constant(x_val, name="x")
theoretical, numerical = gradient_checker_v2.compute_gradient(
nn_ops.l2_loss, [x])
self.assertAllClose(theoretical, numerical)
@test_util.run_all_in_graph_and_eager_modes
class L2NormalizeTest(test_lib.TestCase):
def _l2Normalize(self, x, dim):
if isinstance(dim, list):
norm = np.linalg.norm(x, axis=tuple(dim))
for d in dim:
norm = np.expand_dims(norm, d)
return x / norm
else:
norm = np.apply_along_axis(np.linalg.norm, dim, x)
return x / np.expand_dims(norm, dim)
def testL2Normalize(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float32)
for dim in range(len(x_shape)):
y_np = self._l2Normalize(x_np, dim)
x_tf = constant_op.constant(x_np, name="x")
y_tf = nn_impl.l2_normalize(x_tf, dim)
self.assertAllClose(y_np, self.evaluate(y_tf))
def testL2NormalizeDimArray(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float32)
dim = [1, 2]
y_np = self._l2Normalize(x_np, dim)
x_tf = constant_op.constant(x_np, name="x")
y_tf = nn_impl.l2_normalize(x_tf, dim)
self.assertAllClose(y_np, self.evaluate(y_tf))
def testL2NormalizeGradient(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float64)
with self.cached_session():
x_tf = constant_op.constant(x_np, name="x")
# TODO(b/241834841): Test l2_normalize with `axis` set to other dims
theoretical, numerical = gradient_checker_v2.compute_gradient(
nn_impl.l2_normalize, [x_tf])
self.assertAllClose(theoretical, numerical)
def testL2NormalizeComplex(self):
x_shape = [20, 7, 3]
for dtype in [np.complex64, np.complex128]:
np.random.seed(1)
x_np = (
np.random.random_sample(x_shape).astype(dtype) +
np.random.random_sample(x_shape).astype(dtype) * 1j)
for dim in range(len(x_shape)):
y_np = self._l2Normalize(x_np, dim)
x_tf = constant_op.constant(x_np, name="x")
y_tf = nn_impl.l2_normalize(x_tf, dim)
self.assertAllClose(y_np, self.evaluate(y_tf))
DROPOUT_FNS = [
("stateful_v1", nn_ops.dropout),
("stateful_v2", nn_ops.dropout_v2),
("stateless", functools.partial(nn_ops.stateless_dropout, seed=(1, 2))),
("stateless_philox", functools.partial(
nn_ops.stateless_dropout, seed=(1, 2), rng_alg="philox"))]
class DropoutTest(test_lib.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("_%s_%s_%s" % (case_name, use_noise_shape, keep_prob), dropout_fn, # pylint: disable=g-complex-comprehension
use_noise_shape, keep_prob)
for keep_prob in [0.1, 0.5, 0.8]
for use_noise_shape in ["no", "concrete", "partial"]
for case_name, dropout_fn in DROPOUT_FNS)
def testDropout(self, dropout_fn, use_noise_shape, keep_prob):
# Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate
# that it is producing approximately the right number of ones over a large
# number of samples, based on the keep probability.
if use_noise_shape == "no":
x_dim = 70
y_dim = 30
else:
x_dim = 70 * 30
y_dim = 3
num_iter = 10
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
if use_noise_shape == "no":
noise_shape = None
elif use_noise_shape == "concrete":
noise_shape = [x_dim, 1]
else:
noise_shape = [None, 1]
dropout = dropout_fn(t, rate=(1 - keep_prob), noise_shape=noise_shape)
final_count = 0
self.assertEqual([x_dim, y_dim], dropout.get_shape())
for _ in range(0, num_iter):
value = self.evaluate(dropout)
final_count += np.count_nonzero(value)
# Verifies that there are only two values: 0 and 1/keep_prob.
sorted_value = np.unique(np.sort(value))
self.assertEqual(0, sorted_value[0])
self.assertAllClose(1 / keep_prob, sorted_value[1])
# Check that we are in the 15% error range
expected_count = x_dim * y_dim * keep_prob * num_iter
rel_error = math.fabs(final_count - expected_count) / expected_count
self.assertLess(rel_error, 0.15)
@parameterized.named_parameters(
("_%s_%s" % (case_name, keep_prob), dropout_fn, keep_prob) # pylint: disable=g-complex-comprehension
for keep_prob in [0.1, 0.5, 0.8]
for case_name, dropout_fn in DROPOUT_FNS)
def testShapedDropoutCorrelation(self, dropout_fn, keep_prob):
# Runs a shaped dropout and tests that the correlations are correct.
x_dim = 40
y_dim = 30
num_iter = 10
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
dropout = dropout_fn(t, rate=(1 - keep_prob), noise_shape=[x_dim, 1])
self.assertEqual([x_dim, y_dim], dropout.get_shape())
for _ in range(0, num_iter):
value = self.evaluate(dropout)
# Verifies that each row has only one type of activation.
for i in range(x_dim):
sorted_value = np.unique(np.sort(value[i, :]))
self.assertEqual(sorted_value.size, 1)
@parameterized.named_parameters(
("_%s_%s_%s" % (case_name, keep_prob, use_keep_prob), case_name, # pylint: disable=g-complex-comprehension
dropout_fn, keep_prob, use_keep_prob)
for use_keep_prob in [False, True]
for keep_prob in [0.1, 0.5, 0.8]
for case_name, dropout_fn in DROPOUT_FNS)
@test_util.run_deprecated_v1
def testDropoutPlaceholderRateAndKeepProb(self, case_name, dropout_fn,
keep_prob, use_keep_prob):
# Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate
# that it is producing approximately the right number of ones over a large
# number of samples, based on the keep probability.
if use_keep_prob and case_name != "stateful_v1":
self.skipTest("Only V1 `dropout` has the `keep_prob` argument.")
x_dim = 70
y_dim = 30
num_iter = 10
with self.cached_session():
t = constant_op.constant(
1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
keep_prob_placeholder = array_ops.placeholder(dtypes.float32)
if use_keep_prob:
dropout = dropout_fn(t, keep_prob=keep_prob_placeholder)
else:
dropout = dropout_fn(t, rate=1 - keep_prob_placeholder)
final_count = 0
self.assertEqual([x_dim, y_dim], dropout.get_shape())
for _ in range(0, num_iter):
value = dropout.eval(feed_dict={keep_prob_placeholder: keep_prob})
final_count += np.count_nonzero(value)
# Verifies that there are only two values: 0 and 1/keep_prob.
sorted_value = np.unique(np.sort(value))
self.assertEqual(0, sorted_value[0])
self.assertAllClose(1 / keep_prob, sorted_value[1])
# Check that we are in the 15% error range
expected_count = x_dim * y_dim * keep_prob * num_iter
rel_error = math.fabs(final_count - expected_count) / expected_count
self.assertLess(rel_error, 0.15)
@parameterized.named_parameters(
("_%s" % case_name, dropout_fn)
for case_name, dropout_fn in DROPOUT_FNS)
@test_util.run_deprecated_v1
def testShapedDropoutUnknownShape(self, dropout_fn):
x_dim = 40
y_dim = 30
keep_prob = 0.5
x = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
dropout_x = dropout_fn(
x,
rate=(1 - keep_prob),
noise_shape=array_ops.placeholder(dtypes.int32))
self.assertEqual(x.get_shape(), dropout_x.get_shape())
@parameterized.named_parameters(
("_%s_%s" % (case_name, use_keep_prob), case_name, dropout_fn, # pylint: disable=g-complex-comprehension
use_keep_prob)
for use_keep_prob in [False, True]
for case_name, dropout_fn in DROPOUT_FNS)
@test_util.run_deprecated_v1
def testInvalidRateAndKeepProb(self, case_name, dropout_fn, use_keep_prob):
if use_keep_prob and case_name != "stateful_v1":
self.skipTest("Only V1 `dropout` has the `keep_prob` argument.")
if use_keep_prob:
fn = lambda x, y: dropout_fn(x, keep_prob=y)
else:
fn = lambda x, y: dropout_fn(x, rate=y)
x_dim = 40
y_dim = 30
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
with self.assertRaises(ValueError):
fn(t, -1.0)
with self.assertRaises(ValueError):
fn(t, 1.1)
with self.assertRaises(ValueError):
fn(t, [0.0, 1.0])
with self.assertRaises(ValueError):
fn(t, array_ops.placeholder(dtypes.float64))
with self.assertRaises(ValueError):
fn(t, array_ops.placeholder(dtypes.float32, shape=[2]))
@parameterized.named_parameters(
("_%s" % case_name, dropout_fn)
for case_name, dropout_fn in DROPOUT_FNS)
def testLargeRate(self, dropout_fn):
x_dim = 40
y_dim = 30
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
_ = dropout_fn(t, rate=0.9)
@parameterized.named_parameters(
("_%s" % case_name, dropout_fn)
for case_name, dropout_fn in DROPOUT_FNS)
def testVariableRef(self, dropout_fn):
x = variable_scope.get_variable("x", shape=[10, 10], dtype=dtypes.float32)
_ = dropout_fn(x, rate=0.1)
@parameterized.named_parameters(
("_%s" % case_name, dropout_fn)
for case_name, dropout_fn in DROPOUT_FNS)
@test_util.run_deprecated_v1
def testShapedDropoutShapeError(self, dropout_fn):
# Runs shaped dropout and verifies an error is thrown on misshapen noise.
x_dim = 40
y_dim = 30
keep_prob = 0.5
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
with self.assertRaises(ValueError):
_ = dropout_fn(
t, rate=(1 - keep_prob), noise_shape=[x_dim, y_dim + 10])
with self.assertRaises(ValueError):
_ = dropout_fn(t, rate=(1 - keep_prob), noise_shape=[x_dim, y_dim, 5])
with self.assertRaises(ValueError):
_ = dropout_fn(t, rate=(1 - keep_prob), noise_shape=[x_dim + 3])
with self.assertRaises(ValueError):
_ = dropout_fn(t, rate=(1 - keep_prob), noise_shape=[x_dim])
# test that broadcasting proceeds
_ = dropout_fn(t, rate=(1 - keep_prob), noise_shape=[y_dim])
_ = dropout_fn(t, rate=(1 - keep_prob), noise_shape=[1, y_dim])
_ = dropout_fn(t, rate=(1 - keep_prob), noise_shape=[x_dim, 1])
_ = dropout_fn(t, rate=(1 - keep_prob), noise_shape=[1, 1])
@parameterized.named_parameters(
("_%s" % case_name, dropout_fn)
for case_name, dropout_fn in DROPOUT_FNS)
def testNoDropout(self, dropout_fn):
x = array_ops.zeros((5,))
y = dropout_fn(x, rate=0)
self.assertAllEqual(x, y)
@parameterized.named_parameters(
("_%s" % case_name, dropout_fn)
for case_name, dropout_fn in DROPOUT_FNS)
def testDropoutWithIntegerInputs(self, dropout_fn):
x = constant_op.constant([1, 1, 1, 1, 1])
with self.assertRaises(ValueError):
_ = dropout_fn(x, rate=0.5)
@test_util.run_all_without_tensor_float_32(
"Tests _compute_sampled_logits and related functions, which call matmul")
class ComputeSampledLogitsTest(test_lib.TestCase):
def setUp(self):
self._eps = 1e-3
def _GenerateTestData(self, num_classes, dim, batch_size, num_true, labels,
sampled, subtract_log_q):
"""Randomly generates input/output data for a single test case.
This function returns numpy constants for use in a test case.
Args:
num_classes: An int. The number of embedding classes in the test case.
dim: An int. The dimension of the embedding.
batch_size: An int. The batch size.
num_true: An int. The number of target classes per training example.
labels: A list of batch_size * num_true ints. The target classes.
sampled: A list of indices in [0, num_classes).
subtract_log_q: A bool corresponding to the parameter in
_compute_sampled_logits().
Returns:
weights: Embedding weights to use as test input. It is a numpy array
of shape [num_classes, dim]
biases: Embedding biases to use as test input. It is a numpy array
of shape [num_classes].
hidden_acts: Forward activations of the network to use as test input.
It is a numpy array of shape [batch_size, dim].
sampled_vals: A tuple based on `sampled` to use as test input in the
format returned by a *_candidate_sampler function.
exp_logits: The output logits expected from _compute_sampled_logits().
It is a numpy array of shape [batch_size, num_true + len(sampled)].
exp_labels: The output labels expected from _compute_sampled_logits().
It is a numpy array of shape [batch_size, num_true + len(sampled)].
"""
weights = np.random.randn(num_classes, dim).astype(np.float32)
biases = np.random.randn(num_classes).astype(np.float32)
hidden_acts = np.random.randn(batch_size, dim).astype(np.float32)
true_exp = np.full([batch_size, 1], fill_value=0.5, dtype=np.float32)
sampled_exp = np.full([len(sampled)], fill_value=0.5, dtype=np.float32)
sampled_vals = (sampled, true_exp, sampled_exp)
sampled_w, sampled_b = weights[sampled], biases[sampled]
true_w, true_b = weights[labels], biases[labels]
true_logits = np.sum(
hidden_acts.reshape((batch_size, 1, dim)) * true_w.reshape(
(batch_size, num_true, dim)),
axis=2)
true_b = true_b.reshape((batch_size, num_true))
true_logits += true_b
sampled_logits = np.dot(hidden_acts, sampled_w.T) + sampled_b
if subtract_log_q:
true_logits -= np.log(true_exp)
sampled_logits -= np.log(sampled_exp[np.newaxis, :])
exp_logits = np.concatenate([true_logits, sampled_logits], axis=1)
exp_labels = np.hstack(
(np.ones_like(true_logits) / num_true, np.zeros_like(sampled_logits)))
return weights, biases, hidden_acts, sampled_vals, exp_logits, exp_labels
def _ShardTestEmbeddings(self, weights, biases, num_shards):
"""Shards the weights and biases returned by _GenerateTestData.
Args:
weights: The weights returned by _GenerateTestData.
biases: The biases returned by _GenerateTestData.
num_shards: The number of shards to create.
Returns:
sharded_weights: A list of size `num_shards` containing all the weights.
sharded_biases: A list of size `num_shards` containing all the biases.
"""
with ops.Graph().as_default() as g:
sharded_weights = variable_scope.get_variable(
"w",
partitioner=partitioned_variables.fixed_size_partitioner(num_shards),
initializer=constant_op.constant(weights))
sharded_biases = variable_scope.get_variable(
"b",
partitioner=partitioned_variables.fixed_size_partitioner(num_shards),
initializer=constant_op.constant(biases))
with self.session(graph=g) as sess:
self.evaluate(variables.global_variables_initializer())
return self.evaluate([list(sharded_weights), list(sharded_biases)])
def testShapes(self):
np.random.seed(0)
num_classes = 5
batch_size = 3
for num_true in range(1, 5):
labels = np.random.randint(
low=0, high=num_classes, size=batch_size * num_true)
(weights, biases, hidden_acts, sampled_vals, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=num_true,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=False)
logits_tensor, labels_tensor = _compute_sampled_logits(
weights=constant_op.constant(weights),
biases=constant_op.constant(biases),
labels=constant_op.constant(
labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_vals,
subtract_log_q=False,
remove_accidental_hits=False,
partition_strategy="div",
name="sampled_logits_basic_num_true_%d" % num_true)
got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])
self.assertEqual(exp_logits.shape, got_logits.shape, self._eps)
self.assertEqual(exp_labels.shape, got_labels.shape, self._eps)
def testBasic(self):
"""Without accidental hit removal or subtract_log_q."""
np.random.seed(0)
num_classes = 5
batch_size = 3
for num_true in range(1, 5):
labels = np.random.randint(
low=0, high=num_classes, size=batch_size * num_true)
(weights, biases, hidden_acts, sampled_vals, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=num_true,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=False)
logits_tensor, labels_tensor = _compute_sampled_logits(
weights=constant_op.constant(weights),
biases=constant_op.constant(biases),
labels=constant_op.constant(
labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_vals,
subtract_log_q=False,
remove_accidental_hits=False,
partition_strategy="div",
name="sampled_logits_basic_num_true_%d" % num_true)
got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])
self.assertAllClose(exp_logits, got_logits, self._eps)
self.assertAllClose(exp_labels, got_labels, self._eps)
def testAccidentalHitRemoval(self):
"""With accidental hit removal, no subtract_log_q."""
np.random.seed(0)
num_classes = 5
batch_size = 3
sampled = [1, 0, 2, 3]
for num_true in range(1, 5):
labels = np.random.randint(
low=0, high=num_classes, size=batch_size * num_true)
(weights, biases, hidden_acts, sampled_vals, _,
_) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=num_true,
labels=labels,
sampled=sampled,
subtract_log_q=False)
logits_tensor, _ = _compute_sampled_logits(
weights=constant_op.constant(weights),
biases=constant_op.constant(biases),
labels=constant_op.constant(
labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
inputs=constant_op.constant(hidden_acts),
num_sampled=len(sampled),
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_vals,
subtract_log_q=False,
remove_accidental_hits=True,
partition_strategy="div",
name="sampled_logits_accidental_hit_removal_num_true_%d" % num_true)
# Test that the exponentiated logits of accidental hits are near 0.
# First we need to find the hits in this random test run:
labels_reshape = labels.reshape((batch_size, num_true))
got_logits = self.evaluate(logits_tensor)
for row in range(batch_size):
row_labels = labels_reshape[row, :]
for col in range(len(sampled)):
if sampled[col] in row_labels:
# We need to add the num_true_test offset into logits_*
self.assertNear(
np.exp(got_logits[row, col + num_true]), 0., self._eps)
def testSubtractLogQ(self):
"""With subtract_log_q, no accidental hit removal."""
np.random.seed(0)
num_classes = 5
batch_size = 3
for num_true in range(1, 5):
labels = np.random.randint(
low=0, high=num_classes, size=batch_size * num_true)
(weights, biases, hidden_acts, sampled_vals, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=num_true,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=True)
logits_tensor, labels_tensor = _compute_sampled_logits(
weights=constant_op.constant(weights),
biases=constant_op.constant(biases),
labels=constant_op.constant(
labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_vals,
subtract_log_q=True,
remove_accidental_hits=False,
partition_strategy="div",
name="sampled_logits_subtract_log_q_num_true_%d" % num_true)
got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])
self.assertAllClose(exp_logits, got_logits, self._eps)
self.assertAllClose(exp_labels, got_labels, self._eps)
def testSharded(self):
"""With sharded weights and sharded biases."""
np.random.seed(0)
num_classes = 5
batch_size = 3
for num_true in range(1, 5):
labels = np.random.randint(
low=0, high=num_classes, size=batch_size * num_true)
(weights, biases, hidden_acts, sampled_vals, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=num_true,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=False)
weight_shards, bias_shards = self._ShardTestEmbeddings(
weights, biases, num_shards=3)
logits_tensor, labels_tensor = _compute_sampled_logits(
weights=[constant_op.constant(shard) for shard in weight_shards],
biases=[constant_op.constant(shard) for shard in bias_shards],
labels=constant_op.constant(
labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_vals,
subtract_log_q=False,
remove_accidental_hits=False,
partition_strategy="div",
name="sampled_logits_sharded_num_true_%d" % num_true)
got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])
self.assertAllClose(exp_logits, got_logits, self._eps)
self.assertAllClose(exp_labels, got_labels, self._eps)
def testNCELoss(self):
# A simple test to verify the numerics.
def _SigmoidCrossEntropyWithLogits(logits, targets):
# logits, targets: float arrays of the same shape.
assert logits.shape == targets.shape
pred = 1. / (1. + np.exp(-logits))
eps = 0.0001
pred = np.minimum(np.maximum(pred, eps), 1 - eps)
return -targets * np.log(pred) - (1. - targets) * np.log(1. - pred)
np.random.seed(0)
num_classes = 5
batch_size = 3
labels = [0, 1, 2]
(weights, biases, hidden_acts, sampled_vals, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=1,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=True)
exp_nce_loss = np.sum(
_SigmoidCrossEntropyWithLogits(exp_logits, exp_labels), 1)
got_nce_loss = nn_impl.nce_loss_v2(
weights=constant_op.constant(weights),
biases=constant_op.constant(biases),
labels=constant_op.constant(labels, shape=(batch_size, 1)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=1,
sampled_values=sampled_vals)
self.assertAllClose(exp_nce_loss, self.evaluate(got_nce_loss), 1e-4)
# Test with sharded weights and sharded biases.
weight_shards, bias_shards = self._ShardTestEmbeddings(
weights, biases, num_shards=3)
got_nce_loss = nn_impl.nce_loss_v2(
weights=[constant_op.constant(shard) for shard in weight_shards],
biases=[constant_op.constant(shard) for shard in bias_shards],
labels=constant_op.constant(labels, shape=(batch_size, 1)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=1,
sampled_values=sampled_vals)
self.assertAllClose(exp_nce_loss, self.evaluate(got_nce_loss), 1e-4)
def testSampledSoftmaxLoss(self):
# A simple test to verify the numerics.
def _SoftmaxCrossEntropyWithLogits(logits, targets):
# logits, targets: float arrays of the same shape.
assert logits.shape == targets.shape
stable_exp_logits = np.exp(logits -
np.amax(logits, axis=1, keepdims=True))
pred = stable_exp_logits / np.sum(stable_exp_logits, 1, keepdims=True)
return -np.sum(targets * np.log(pred + 1.0e-20), axis=1)
np.random.seed(0)
num_classes = 5
batch_size = 3
labels = [0, 1, 2]
(weights, biases, hidden_acts, sampled_vals, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=1,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=True)
exp_sampled_softmax_loss = _SoftmaxCrossEntropyWithLogits(
exp_logits, exp_labels)
got_sampled_softmax_loss = nn_impl.sampled_softmax_loss_v2(
weights=constant_op.constant(weights),
biases=constant_op.constant(biases),
labels=constant_op.constant(labels, shape=(batch_size, 1)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=1,
sampled_values=sampled_vals,
remove_accidental_hits=False)
self.assertAllClose(exp_sampled_softmax_loss,
self.evaluate(got_sampled_softmax_loss), 1e-4)
# Test with sharded weights and sharded biases.
weight_shards, bias_shards = self._ShardTestEmbeddings(
weights, biases, num_shards=3)
got_sampled_softmax_loss = nn_impl.sampled_softmax_loss_v2(
weights=[constant_op.constant(shard) for shard in weight_shards],
biases=[constant_op.constant(shard) for shard in bias_shards],
labels=constant_op.constant(labels, shape=(batch_size, 1)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=1,
sampled_values=sampled_vals,
remove_accidental_hits=False)
self.assertAllClose(exp_sampled_softmax_loss,
self.evaluate(got_sampled_softmax_loss), 1e-4)
def testSampledSoftmaxLossBf16(self):
# A simple test to verify the numerics for bfloat16.
def _SoftmaxCrossEntropyWithLogits(logits, targets):
# logits, targets: float arrays of the same shape.
assert logits.shape == targets.shape
stable_exp_logits = np.exp(logits -
np.amax(logits, axis=1, keepdims=True))
pred = stable_exp_logits / np.sum(stable_exp_logits, 1, keepdims=True)
return -np.sum(targets * np.log(pred + 1.0e-20), axis=1)
np.random.seed(0)
num_classes = 5
batch_size = 3
labels = [0, 1, 2]
sampled = [1, 0, 2, 3]
(weights, biases, hidden_acts, _, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=1,
labels=labels,
sampled=sampled,
subtract_log_q=True)
exp_sampled_softmax_loss = _SoftmaxCrossEntropyWithLogits(
exp_logits, exp_labels)
true_exp_bf16 = np.full([batch_size, 1],
fill_value=0.5,
dtype=dtypes.bfloat16.as_numpy_dtype)
sampled_exp_bf16 = np.full([len(sampled)],
fill_value=0.5,
dtype=dtypes.bfloat16.as_numpy_dtype)
sampled_vals_bf16 = (sampled, true_exp_bf16, sampled_exp_bf16)
got_sampled_softmax_loss = math_ops.cast(
nn_impl.sampled_softmax_loss_v2(
weights=constant_op.constant(weights, dtype=dtypes.bfloat16),
biases=constant_op.constant(biases, dtype=dtypes.bfloat16),
labels=constant_op.constant(
labels, shape=(batch_size, 1), dtype=dtypes.bfloat16),
inputs=constant_op.constant(hidden_acts, dtype=dtypes.bfloat16),
num_sampled=4,
num_classes=num_classes,
num_true=1,
sampled_values=sampled_vals_bf16,
remove_accidental_hits=False), dtypes.float32)
self.assertAllClose(exp_sampled_softmax_loss,
self.evaluate(got_sampled_softmax_loss), 1e-1)
class CReluTest(test_lib.TestCase):
def test(self):
np.random.seed(1) # Make it reproducible.
x = np.random.randn(3, 4).astype(np.float32)
y = np.concatenate([x * (x > 0), -x * (x < 0)], axis=1)
z = self.evaluate(nn_ops.crelu(constant_op.constant(x)))
self.assertAllClose(y, z, 1e-4)
class ReluTest(test_lib.TestCase):
def test(self):
np.random.seed(1) # Make it reproducible.
x = np.random.randn(3, 4).astype(np.float32)
y = np.maximum(x, 0.0)
z = self.evaluate(nn_ops.relu(constant_op.constant(x)))
self.assertAllEqual(y, z)
@test_util.disable_xla(
"This test relies on undefined behavior that XLA does not replicate")
@test_util.run_deprecated_v1
def testNaNs(self):
# Test that relu(nan) = nan for various sizes.
for i in range(18):
x = np.zeros(i) + np.nan
# TODO(b/178335491): This is broken on GPU today.
with self.cached_session(use_gpu=False):
z = nn_ops.relu(constant_op.constant(x)).eval()
self.assertTrue(np.isnan(z).all())
class LeakyReluTest(test_lib.TestCase):
def testRange(self):
batch_size = 3
height, width = 4, 4
np.random.seed(1) # Make it reproducible.
inputs = np.random.uniform(size=(batch_size, height, width,
3)).astype(np.float32)
inputs = constant_op.constant(inputs)
outputs = nn_ops.leaky_relu(inputs)
self.assertEqual(inputs.shape, outputs.shape)
inputs, outputs = self.evaluate([inputs, outputs])
self.assertGreaterEqual(outputs.min(), 0.0)
self.assertLessEqual(outputs.max(), 1.0)
self.assertAllClose(inputs, outputs)
@test_util.run_deprecated_v1
def testValues(self):
for dtype in [np.int32, np.int64, np.float16, np.float32, np.float64]:
np_values = np.array([-2, -1, 0, 1, 2], dtype=dtype)
outputs = nn_ops.leaky_relu(constant_op.constant(np_values))
outputs = self.evaluate(outputs)
tol = 2e-3 if dtype == np.float16 else 1e-6
self.assertAllClose(
outputs, [-0.4, -0.2, 0.0, 1.0, 2.0], rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testName(self):
np_values = np.array([-2, -1, 0, 1, 2], dtype=np.float64)
outputs_with_name_set = nn_ops.leaky_relu(
constant_op.constant(np_values), name="test_relu_op")
self.assertEqual(outputs_with_name_set.name, "test_relu_op:0")
outputs_without_name_set = nn_ops.leaky_relu(
constant_op.constant(np_values))
self.assertEqual(outputs_without_name_set.name, "LeakyRelu:0")
class GeluTest(test_lib.TestCase):
def test(self):
def gelu(x, approximate=False):
if approximate:
return 0.5 * x * (1.0 + np.tanh(
np.sqrt(2.0 / np.pi) * (x + 0.044715 * np.power(x, 3))))
else:
from scipy.stats import norm # pylint: disable=g-import-not-at-top
return x * norm.cdf(x)
np.random.seed(1) # Make it reproducible.
x = np.random.randn(3, 4).astype(np.float32)
y = gelu(x)
z = self.evaluate(nn_ops.gelu(x))
self.assertAllClose(y, z)
y = gelu(x, True)
z = self.evaluate(nn_ops.gelu(x, True))
self.assertAllClose(y, z)
@test_util.run_all_in_graph_and_eager_modes
class SwishTest(test_lib.TestCase):
def testValues(self):
np_values = np.array(
[np.linspace(-7.0, 0.0, 100),
np.linspace(0.0, 7.0, 100)],
dtype=np.float32)
tf_values = constant_op.constant(np_values)
actual_tf_outputs = nn_impl.swish(tf_values)
expected_tf_outputs = tf_values * math_ops.sigmoid(tf_values)
actual_outputs, expected_outputs = self.evaluate(
[actual_tf_outputs, expected_tf_outputs])
self.assertAllClose(actual_outputs, expected_outputs)
def testValuesWithBeta(self):
np_values = np.array(
[np.linspace(-7.0, 0.0, 100),
np.linspace(0.0, 7.0, 100)],
dtype=np.float32)
tf_values = constant_op.constant(np_values)
actual_tf_outputs = nn_impl.swish(tf_values, beta=0.5)
expected_tf_outputs = tf_values * math_ops.sigmoid(0.5 * tf_values)
actual_outputs, expected_outputs = self.evaluate(
[actual_tf_outputs, expected_tf_outputs])
self.assertAllClose(actual_outputs, expected_outputs)
def testGradients(self):
shape = [5, 3, 4]
sigma = 5
input_values = np.random.randn(*shape) * sigma
x_tf = constant_op.constant(input_values)
with self.cached_session():
def f(x): # pylint: disable=invalid-name
return nn_impl.swish(x)
theoretical, numerical = gradient_checker_v2.compute_gradient(
f, [x_tf])
self.assertAllClose(theoretical, numerical)
def testGradientsWithBeta(self):
shape = [5, 3, 4]
sigma = 5
input_values = np.random.randn(*shape) * sigma
x_tf = constant_op.constant(input_values)
with self.cached_session():
def f(x): # pylint: disable=invalid-name
return nn_impl.swish(x, beta=0.5)
theoretical, numerical = gradient_checker_v2.compute_gradient(
f, [x_tf])
self.assertAllClose(theoretical, numerical)
class MomentsTest(test_lib.TestCase):
def doOutputTest(self,
input_shape,
moments_axes,
tol=1e-4,
check_gradients=False):
for mu in [0.0, 1.0, 1e3]:
for sigma in [1.0, 0.1]:
for keep_dims in [True, False]:
input_values = np.random.rand(*input_shape) * sigma + mu
expected_mean = np.mean(
input_values, axis=moments_axes, keepdims=keep_dims)
expected_var = np.var(
input_values, axis=moments_axes, keepdims=keep_dims)
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
inputs = constant_op.constant(
input_values, shape=input_shape, dtype=dtypes.float32)
mean, variance = nn_impl.moments_v2(
inputs, moments_axes, keepdims=keep_dims)
if check_gradients:
err = gradient_checker.compute_gradient_error(
inputs, input_shape, mean, mean.shape.as_list())
self.assertLess(err, 1e-3)
err = gradient_checker.compute_gradient_error(
inputs, input_shape, variance, variance.shape.as_list())
self.assertLess(err, 1e-3)
# Evaluate.
[mean, variance] = self.evaluate([mean, variance])
# Make sure that there are no NaNs
self.assertFalse(np.isnan(mean).any())
self.assertFalse(np.isnan(variance).any())
self.assertAllClose(mean, expected_mean, rtol=tol, atol=tol)
self.assertAllClose(variance, expected_var, rtol=tol, atol=tol)
def testOutputAndGradient2DInput0(self):
self.doOutputTest((10, 10), (0,), check_gradients=True)
def testOutputAndGradient2DInput01(self):
self.doOutputTest((10, 10), (0, 1), check_gradients=True)
def testOutput2DInput0(self):
self.doOutputTest((10, 300), (0,))
def testOutput2DInput1(self):
self.doOutputTest((10, 300), (1,))
def testOutput2DInput01(self):
self.doOutputTest((10, 300), (0, 1))
def testOutput4DInput0(self):
self.doOutputTest((10, 10, 10, 30), (0,))
def testOutput4DInput1(self):
self.doOutputTest((10, 10, 10, 30), (1,))
def testOutput4DInput3(self):
self.doOutputTest((10, 10, 10, 30), (3,))
def testOutput4DInput012(self):
self.doOutputTest((10, 10, 10, 30), (0, 1, 2))
def testOutput4DInput123(self):
self.doOutputTest((10, 10, 10, 30), (1, 2, 3))
class DataFormatDimMapTest(test_lib.TestCase):
def _test(self, x_val, y_val_expected):
x = constant_op.constant(x_val)
y = nn_ops.data_format_dim_map(x)
y_val = self.evaluate(y)
self.assertAllEqual(y_val, y_val_expected)
def test(self):
self._test(0, 0)
self._test(1, 2)
self._test(2, 3)
self._test(3, 1)
self._test(-1, 1)
self._test(-2, 3)
self._test(-3, 2)
self._test(-4, 0)
self._test([1, 3], [2, 1])
self._test([1, 3, -2], [2, 1, 3])
self._test([1, -3, -2], [2, 2, 3])
self._test([[1, -3], [1, -1]], [[2, 2], [2, 1]])
def testNHWCtoNCHW(self):
x_val = [1, -3, -2]
y_val_expected = [2, 2, 3]
x = constant_op.constant(x_val)
y = nn_ops.data_format_dim_map(x, src_format="NHWC", dst_format="NCHW")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, y_val_expected)
def testNHWCtoHWNC(self):
x_val = [-4, -3, -2, -1, 0, 1, 2, 3]
y_val_expected = [2, 0, 1, 3, 2, 0, 1, 3]
x = constant_op.constant(x_val)
y = nn_ops.data_format_dim_map(x, src_format="NHWC", dst_format="HWNC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, y_val_expected)
def testNHWCtoWHCN(self):
x_val = [-4, -3, -2, -1, 0, 1, 2, 3]
y_val_expected = [3, 1, 0, 2, 3, 1, 0, 2]
x = constant_op.constant(x_val)
y = nn_ops.data_format_dim_map(x, src_format="NHWC", dst_format="WHCN")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, y_val_expected)
def testNDHWCtoNCDHW(self):
x_val = [1, -4, -3, -2]
y_val_expected = [2, 2, 3, 4]
x = constant_op.constant(x_val)
y = nn_ops.data_format_dim_map(x, src_format="NDHWC", dst_format="NCDHW")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, y_val_expected)
def testNDHWCtoDHWNC(self):
x_val = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
y_val_expected = [3, 0, 1, 2, 4, 3, 0, 1, 2, 4]
x = constant_op.constant(x_val)
y = nn_ops.data_format_dim_map(x, src_format="NDHWC", dst_format="DHWNC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, y_val_expected)
def testDNHWCtoWHDCN(self):
x_val = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
y_val_expected = [4, 2, 1, 0, 3, 4, 2, 1, 0, 3]
x = constant_op.constant(x_val)
y = nn_ops.data_format_dim_map(x, src_format="NDHWC", dst_format="WHDCN")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, y_val_expected)
@test_util.disable_xla("XLA catches the error and rethrows as different one")
def testArbitraryASCII(self):
x_val = [-4, -3, -2, -1, 0, 1, 2, 3]
y_val_expected = [3, 2, 1, 0, 3, 2, 1, 0]
x = constant_op.constant(x_val)
y = nn_ops.data_format_dim_map(x, src_format="qwer", dst_format="rewq")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, y_val_expected)
@test_util.disable_xla("XLA catches the error and rethrows as different one")
def testInvalidLength(self):
x = [-4, -3, -2, -1, 0, 1, 2, 3]
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Source format must be of length 4 or 5"):
op = nn_ops.data_format_dim_map(
x, src_format="12345678", dst_format="87654321")
with test_util.use_gpu():
self.evaluate(op)
@test_util.disable_xla("XLA catches the error and rethrows as different one")
def testDuplicateSrc(self):
x = [-4, -3, -2, -1, 0, 1, 2, 3]
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Destination and source format must determine a permutation"):
op = nn_ops.data_format_dim_map(x, src_format="1233", dst_format="4321")
with test_util.use_gpu():
self.evaluate(op)
@test_util.disable_xla("XLA catches the error and rethrows as different one")
def testDuplicateDst(self):
x = [-4, -3, -2, -1, 0, 1, 2, 3]
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Destination and source format must determine a permutation"):
op = nn_ops.data_format_dim_map(x, src_format="1234", dst_format="3321")
with test_util.use_gpu():
self.evaluate(op)
@test_util.disable_xla("XLA catches the error and rethrows as different one")
def testExtraSpecifiers(self):
x = [-4, -3, -2, -1, 0, 1, 2, 3]
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Destination and source format must determine a permutation"):
op = nn_ops.data_format_dim_map(x, src_format="1234", dst_format="5321")
with test_util.use_gpu():
self.evaluate(op)
class DataFormatVectorPermuteTest(test_lib.TestCase):
def testNHWCToNCHW(self):
x_val = [7, 4, 9, 3]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x)
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [7, 3, 4, 9])
def testNHWCToNCHW_Size2(self):
x_val = [4, 9]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x)
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [4, 9])
def testNDHWCtoNCDHW(self):
x_val = [7, 4, 9, 3, 5]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(
x, src_format="NDHWC", dst_format="NCDHW")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [7, 5, 4, 9, 3])
def testNDHWCtoNCDHW_Size3(self):
x_val = [4, 9, 3]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(
x, src_format="NDHWC", dst_format="NCDHW")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [4, 9, 3])
@test_util.disable_xla("unsupported data format")
def testNHWCToWHCN(self):
x_val = [7, 4, 9, 3]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x, src_format="NHWC", dst_format="WHCN")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [9, 4, 3, 7])
@test_util.disable_xla("unsupported data format")
def testNHWCToWHCN_Size2(self):
x_val = [4, 9]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x, src_format="NHWC", dst_format="WHCN")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [9, 4])
@test_util.disable_xla("unsupported data format")
def testNDHWCToWHDCN(self):
x_val = [7, 4, 9, 3, 5]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(
x, src_format="NDHWC", dst_format="WHDCN")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [3, 9, 4, 5, 7])
@test_util.disable_xla("unsupported data format")
def testNDHWCToWHDCN_Size3(self):
x_val = [4, 9, 3]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(
x, src_format="NDHWC", dst_format="WHDCN")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [3, 9, 4])
def testNCHWToNHWC(self):
x_val = [7, 4, 9, 3]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x, src_format="NCHW", dst_format="NHWC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [7, 9, 3, 4])
def testNCHWToNHWC_Size2(self):
x_val = [9, 3]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x)
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [9, 3])
def testNCDHWToNDHWC(self):
x_val = [7, 4, 9, 3, 5]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(
x, src_format="NCDHW", dst_format="NDHWC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [7, 9, 3, 5, 4])
def testNCDHWToNDHWC_Size3(self):
x_val = [9, 3, 5]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(
x, src_format="NCDHW", dst_format="NDHWC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [9, 3, 5])
def testNHWCToHWNC(self):
x_val = [7, 4, 9, 3]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x, src_format="NHWC", dst_format="HWNC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [4, 9, 7, 3])
def testHWNCToNHWC(self):
x_val = [7, 4, 9, 3]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x, src_format="HWNC", dst_format="NHWC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [9, 7, 4, 3])
def testNHWCToNCHW2D(self):
x_val = [[7, 4], [9, 3], [4, 5], [5, 1]]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x)
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [[7, 4], [5, 1], [9, 3], [4, 5]])
def testNHWCToHWNC2D(self):
x_val = [[7, 4], [9, 3], [4, 5], [5, 1]]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x, src_format="NHWC", dst_format="HWNC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [[9, 3], [4, 5], [7, 4], [5, 1]])
def testHWNCToNHWC2D(self):
x_val = [[7, 4], [9, 3], [4, 5], [5, 1]]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x, src_format="HWNC", dst_format="NHWC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [[4, 5], [7, 4], [9, 3], [5, 1]])
def testNCHWToNHWC2D(self):
x_val = [[7, 4], [9, 3], [4, 5], [5, 1]]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(x, src_format="NCHW", dst_format="NHWC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [[7, 4], [4, 5], [5, 1], [9, 3]])
def testNDHWCToNCDHW2D(self):
x_val = [[7, 4], [9, 3], [4, 5], [5, 1], [8, 2]]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(
x, src_format="NDHWC", dst_format="NCDHW")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [[7, 4], [8, 2], [9, 3], [4, 5], [5, 1]])
@test_util.disable_xla("unsupported data format")
def testNDHWCToDHWNC2D(self):
x_val = [[7, 4], [9, 3], [4, 5], [5, 1], [8, 2]]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(
x, src_format="NDHWC", dst_format="DHWNC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [[9, 3], [4, 5], [5, 1], [7, 4], [8, 2]])
@test_util.disable_xla("unsupported data format")
def testDHWNCToNDHWC2D(self):
x_val = [[7, 4], [9, 3], [4, 5], [5, 1], [8, 2]]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(
x, src_format="DHWNC", dst_format="NDHWC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [[5, 1], [7, 4], [9, 3], [4, 5], [8, 2]])
def testNCDHWToNDHWC2D(self):
x_val = [[7, 4], [9, 3], [4, 5], [5, 1], [8, 2]]
x = constant_op.constant(x_val)
y = nn_ops.data_format_vec_permute(
x, src_format="NCDHW", dst_format="NDHWC")
with test_util.use_gpu():
y_val = self.evaluate(y)
self.assertAllEqual(y_val, [[7, 4], [4, 5], [5, 1], [8, 2], [9, 3]])
@test_util.disable_xla("XLA catches the error and rethrows as different one")
def testInvalidLength(self):
x = [0, 1, 2, 3]
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Source format must be of length 4 or 5"):
op = nn_ops.data_format_vec_permute(
x, src_format="12345678", dst_format="87654321")
with test_util.use_gpu():
self.evaluate(op)
@test_util.disable_xla("XLA catches the error and rethrows as different one")
def testDuplicateSrc(self):
x = [0, 1, 2, 3]
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Destination and source format must determine a permutation"):
op = nn_ops.data_format_vec_permute(
x, src_format="1233", dst_format="4321")
with test_util.use_gpu():
self.evaluate(op)
@test_util.disable_xla("XLA catches the error and rethrows as different one")
def testDuplicateDst(self):
x = [0, 1, 2, 3]
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Destination and source format must determine a permutation"):
op = nn_ops.data_format_vec_permute(
x, src_format="1234", dst_format="3321")
with test_util.use_gpu():
self.evaluate(op)
@test_util.disable_xla("XLA catches the error and rethrows as different one")
def testExtraSpecifiers(self):
x = [0, 1, 2, 3]
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Destination and source format must determine a permutation"):
op = nn_ops.data_format_vec_permute(
x, src_format="1234", dst_format="5321")
with test_util.use_gpu():
self.evaluate(op)
@test_util.disable_xla("XLA catches the error and rethrows as different one")
def test2DNoWH(self):
x = [[0, 1], [2, 3]]
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Format specifier must contain H and W for 2D case"):
op = nn_ops.data_format_vec_permute(
x, src_format="1234", dst_format="4321")
with test_util.use_gpu():
self.evaluate(op)
@test_util.run_all_in_graph_and_eager_modes
class AvgPoolTest(test_lib.TestCase):
def test1DTensor(self):
x = array_ops.ones([3, 6, 5])
ksize = 2
strides = 2
y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.avg_pool1d(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test1DNumpy(self):
# explicitly use float32 for ROCm, as MIOpen does not yet support float64
# np.ones defaults to using float64 when dtype is not explicitly specified
dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64
x = np.ones([3, 6, 5], dtype=dtype)
ksize = 2
strides = 2
y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.avg_pool1d(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test1DNumpyWithGolden(self):
dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64
x = np.array([[[3], [6], [5]], [[1], [0], [1]]], dtype=dtype)
ksize = 2
strides = 1
y = nn_ops.avg_pool1d(x, ksize, strides, "SAME")
expected_y = np.array([[[4.5], [5.5], [5.0]], [[0.5], [0.5], [1.0]]],
dtype=dtype)
self.assertAllEqual(self.evaluate(y), expected_y)
def test2DTensor(self):
x = array_ops.ones([3, 6, 6, 5])
ksize = 2
strides = 2
y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.avg_pool(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test2DNumpy(self):
# explicitly use float32 for ROCm, as MIOpen does not yet support float64
# np.ones defaults to using float64 when dtype is not explicitly specified
dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64
x = np.ones([3, 6, 6, 5], dtype=dtype)
ksize = 2
strides = 2
y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.avg_pool(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test3DTensor(self):
x = array_ops.ones([3, 7, 6, 6, 5])
ksize = 2
strides = 2
y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.avg_pool3d(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test3DNumpy(self):
x = np.ones([3, 7, 6, 6, 5], dtype=np.float32)
ksize = 2
strides = 2
y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.avg_pool3d(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
@test_util.run_all_in_graph_and_eager_modes
class MaxPoolTest(test_lib.TestCase):
def test1DTensor(self):
x = array_ops.ones([3, 6, 5])
ksize = 2
strides = 2
y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.max_pool1d(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test1DNumpy(self):
# explicitly use float32 for ROCm, as MIOpen does not yet support float64
# np.ones defaults to using float64 when dtype is not explicitly specified
dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64
x = np.ones([3, 6, 5], dtype=dtype)
ksize = 2
strides = 2
y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.max_pool1d(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test1DNumpyWithGolden(self):
dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64
x = np.array([[[3], [6], [5]], [[1], [0], [1]]], dtype=dtype)
ksize = 2
strides = 1
y = nn_ops.max_pool1d(x, ksize, strides, "SAME")
expected_y = np.array([[[6], [6], [5]], [[1], [1], [1]]], dtype=dtype)
self.assertAllEqual(self.evaluate(y), expected_y)
def test2DTensor(self):
x = array_ops.ones([3, 6, 6, 5])
ksize = 2
strides = 2
y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.max_pool(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test2DNumpy(self):
# explicitly use float32 for ROCm, as MIOpen does not yet support float64
# np.ones defaults to using float64 when dtype is not explicitly specified
dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64
x = np.ones([3, 6, 6, 5], dtype=dtype)
ksize = 2
strides = 2
y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.max_pool(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test3DTensor(self):
x = array_ops.ones([3, 7, 6, 6, 5])
ksize = 2
strides = 2
y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.max_pool3d(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test3DNumpy(self):
x = np.ones([3, 7, 6, 6, 5], dtype=np.float32)
ksize = 2
strides = 2
y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.max_pool3d(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def testIncorrectSizeInputSmall(self):
x = array_ops.ones([3, 4])
with self.assertRaisesRegex(
ValueError,
"`input.shape.rank` must be 3, 4 or 5.*of rank 2."):
nn_ops.max_pool_v2(x, 2, 2, "SAME")
def testIncorrectSizeInput(self):
x = array_ops.ones([3, 4, 1, 2, 1, 2])
with self.assertRaisesRegex(
ValueError,
"`input.shape.rank` must be 3, 4 or 5.*of rank 6."):
nn_ops.max_pool_v2(x, 2, 2, "SAME")
@test_util.run_all_in_graph_and_eager_modes
class ConvolutionTest(test_lib.TestCase):
def testUnknownSize(self):
x = tensor_spec.TensorSpec(None, dtypes.float32, name="x")
k = np.ones([3, 6, 6, 5], dtype=np.float32)
@def_function.function
def F(value):
return nn_ops.convolution(value, k, "SAME")
F.get_concrete_function(x)
class ConvTransposeTest(test_lib.TestCase):
def test1D(self):
t = array_ops.ones([2, 4, 3])
v = array_ops.ones([2, 5, 3])
strides = 2
y1 = nn_ops.conv1d_transpose(t, v, [2, 8, 5], strides)
y2 = nn_ops.conv_transpose(t, v, [2, 8, 5], strides)
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test1DTensor(self):
t = array_ops.ones([2, 4, 3])
v = array_ops.ones([2, 5, 3])
strides = 2
y1 = nn_ops.conv1d_transpose(t, v, [2, 8, 5], strides)
y2 = nn_ops.conv_transpose(t, v, constant_op.constant([2, 8, 5]), strides)
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test2D(self):
t = array_ops.ones([2, 4, 4, 3])
v = array_ops.ones([2, 2, 5, 3])
strides = 2
y1 = nn_ops.conv2d_transpose_v2(t, v, [2, 8, 8, 5], strides)
y2 = nn_ops.conv_transpose(t, v, [2, 8, 8, 5], strides)
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test2DTensor(self):
t = array_ops.ones([2, 4, 4, 3])
v = array_ops.ones([2, 2, 5, 3])
strides = 2
y1 = nn_ops.conv2d_transpose_v2(t, v, [2, 8, 8, 5], strides)
y2 = nn_ops.conv_transpose(t, v, constant_op.constant([2, 8, 8, 5]),
strides)
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test3D(self):
t = array_ops.ones([2, 4, 4, 4, 3])
v = array_ops.ones([2, 2, 2, 5, 3])
strides = 2
y1 = nn_ops.conv3d_transpose_v2(t, v, [2, 8, 8, 8, 5], strides)
y2 = nn_ops.conv_transpose(t, v, [2, 8, 8, 8, 5], strides)
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test3DTensor(self):
t = array_ops.ones([2, 4, 4, 4, 3])
v = array_ops.ones([2, 2, 2, 5, 3])
strides = 2
y1 = nn_ops.conv3d_transpose_v2(t, v, [2, 8, 8, 8, 5], strides)
y2 = nn_ops.conv_transpose(t, v, constant_op.constant([2, 8, 8, 8, 5]),
strides)
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def testIncorrectSizeInputSmall(self):
with self.assertRaisesRegex(
ValueError,
"`output_shape` must be of length 3, 4 or 5.*of length 2."):
nn_ops.conv_transpose(None, 2, [2, 3], "SAME")
def testIncorrectSizeInput(self):
with self.assertRaisesRegex(
ValueError,
"`output_shape` must be of length 3, 4 or 5.* of length 6."):
nn_ops.conv_transpose(None, 2, [2, 3, 4, 2, 5, 1], "SAME")
def testTensorsNoShape(self):
with self.assertRaisesRegex(
ValueError, "`output_shape` must be a tensor or sized collection"):
nn_ops.conv_transpose(None, None, None, None)
class RaggedEmbeddingTest(test_lib.TestCase):
def testRaggedTensor(self):
weights = constant_op.constant([[0, 0, 0], [1, 1, 1], [2, 2, 2], [3, 3, 3]])
ragged_ids = ragged_factory_ops.constant([[1, 2, 3], [0], [1, 2]],
ragged_rank=1)
embedded_ragged = nn.embedding_lookup_ragged(weights, ragged_ids)
expected_output = ragged_factory_ops.constant(
[[[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[0, 0, 0]], [[1, 1, 1], [2, 2, 2]]
],
ragged_rank=1)
self.assertAllEqual(expected_output, embedded_ragged)
def testMultipleRaggedDimTensor(self):
weights = constant_op.constant([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4],
[5, 5], [6, 6]])
ragged_ids = ragged_factory_ops.constant(
[[[[3, 4], [0, 6]], []], [[[2, 1], [1, 0]], [[2, 5], [2, 3]]], [[[1, 0]]
]],
ragged_rank=2)
embedded_ragged = nn.embedding_lookup_ragged(weights, ragged_ids)
expected_output = ragged_factory_ops.constant(
[[[[[3, 3], [4, 4]], [[0, 0], [6, 6]]], []],
[[[[2, 2], [1, 1]], [[1, 1], [0, 0]]],
[[[2, 2], [5, 5]], [[2, 2], [3, 3]]]], [[[[1, 1], [0, 0]]]]],
ragged_rank=2)
self.assertAllEqual(expected_output, embedded_ragged)
def testMissingWeights(self):
ragged_ids = ragged_factory_ops.constant([[1, 2, 3], [0], [1, 2]])
with self.assertRaisesRegex(ValueError,
"The embedding weights must be specified.*"):
nn.embedding_lookup_ragged(None, ragged_ids)
def testEmptyWeights(self):
ragged_ids = ragged_factory_ops.constant([[1, 2, 3], [0], [1, 2]])
with self.assertRaisesRegex(ValueError,
"The embedding weights should not be empty.*"):
nn.embedding_lookup_ragged([], ragged_ids)
def testInvalidIndicesType(self):
weights = constant_op.constant([[0, 0, 0], [1, 1, 1], [2, 2, 2]])
ragged_ids = ragged_factory_ops.constant([[1., 2., 3.], [1., 2.]])
with self.assertRaisesRegex(
ValueError, "The values contained by the inputs have type*"):
nn.embedding_lookup_ragged(weights, ragged_ids)
def testMaxNormForEmbeddings(self):
weights = constant_op.constant(
[[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]],
dtype=dtypes.float32)
ragged_ids = ragged_factory_ops.constant([[1, 2, 3], [0], [1, 2]],
ragged_rank=1)
actual_embeddings = [
nn.embedding_lookup(weights, ragged_ids, max_norm=max_norm)
for max_norm in [1, 2, 5]
]
expected_embeddings = (
# max_norm = 1
[[[.5, .5, .5, .5], [.5, .5, .5, .5], [.5, .5, .5, .5]], [[0, 0, 0, 0]],
[[.5, .5, .5, .5], [.5, .5, .5, .5]]],
# max_norm = 2
[[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0]],
[[1, 1, 1, 1], [1, 1, 1, 1]]],
# max_norm = 5
[[[1, 1, 1, 1], [2, 2, 2, 2], [2.5, 2.5, 2.5, 2.5]], [[0, 0, 0, 0]],
[[1, 1, 1, 1], [2, 2, 2, 2]]],
)
for expected, actual in zip(expected_embeddings, actual_embeddings):
self.assertAllClose(
ragged_factory_ops.constant(expected, dtype=float, ragged_rank=1),
actual)
class IsotonicTest(parameterized.TestCase, test_lib.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_increasing_and_decreasing(self):
x = constant_op.constant([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
dtype=dtypes.float64)
y, segments = nn_ops.isotonic_regression(x, decreasing=False)
self.assertAllClose(y, x)
self.assertAllClose(segments, [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]])
y, segments = nn_ops.isotonic_regression(x, decreasing=True)
self.assertAllClose(
y,
[
[2, 2, 2, 2, 2], # Average of the inputs.
[7, 7, 7, 7, 7]
])
self.assertAllClose(segments, array_ops.zeros((2, 5)))
# pylint: disable=invalid-unary-operand-type
y, segments = nn_ops.isotonic_regression(-x, decreasing=True)
self.assertAllClose(segments, [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]])
self.assertAllClose(y, -x)
y, segments = nn_ops.isotonic_regression(-x, decreasing=False)
self.assertAllClose(
-y,
[
[2, 2, 2, 2, 2], # Average of the inputs.
[7, 7, 7, 7, 7]
])
self.assertAllClose(segments, array_ops.zeros((2, 5)))
@test_util.run_in_graph_and_eager_modes
def test_different_axis(self):
x = constant_op.constant([[0, 6, 2, 8, 4], [5, 1, 7, 3, 9]],
dtype=dtypes.float64)
y, segments = nn_ops.isotonic_regression(x, decreasing=True, axis=0)
self.assertAllClose(
y,
[
[2.5, 6, 4.5, 8, 6.5], # Either identity or average.
[2.5, 1, 4.5, 3, 6.5]
])
self.assertAllClose(segments, [[0, 0, 0, 0, 0], [0, 1, 0, 1, 0]])
@test_util.run_v2_only
def testGradientV2(self, dtype=np.float64, batch_size=30, dimensions=50):
@def_function.function
def ComputeIsotonicFn(x):
y, _ = nn_ops.isotonic_regression(x) # No gradient wrt segments.
return y
np.random.seed(0)
x_init = np.random.randn(batch_size, dimensions).astype(dtype)
grad_theoretical, grad_numerical = gradient_checker_v2.compute_gradient(
ComputeIsotonicFn, [x_init], delta=1e-5)
self.assertAllClose(grad_theoretical, grad_numerical)
@test_util.run_v1_only("compute_gradient_error is v1 only")
def testGradientV1(self, dtype=np.float64, batch_size=30, dimensions=50):
np.random.seed(0)
x_init = np.random.randn(batch_size, dimensions).astype(dtype)
with self.cached_session():
x = array_ops.placeholder(dtype, (batch_size, dimensions))
y, _ = nn_ops.isotonic_regression(x) # Segments have no gradient.
max_error = gradient_checker.compute_gradient_error(
x, (batch_size, dimensions), y, (batch_size, dimensions), x_init)
self.assertAllClose(max_error, 0.)
@parameterized.parameters([[dtypes.half, dtypes.half],
[dtypes.bfloat16, dtypes.bfloat16],
[dtypes.float32, dtypes.float32],
[dtypes.float64, dtypes.float64],
[dtypes.int32, dtypes.float64],
[dtypes.int16, dtypes.float32]])
def testTypePromotion(self, dtype_in, expected_dtype_out):
x = constant_op.constant([[0, 6, 2, 8, 4], [5, 1, 7, 3, 9]], dtype=dtype_in)
y, segments = nn_ops.isotonic_regression(x)
self.assertEqual(y.dtype, expected_dtype_out)
self.assertEqual(segments.dtype, dtypes.int32)
if __name__ == "__main__":
test_lib.main()
| {
"content_hash": "d80d24dfcc521997f4995b33188fba8a",
"timestamp": "",
"source": "github",
"line_count": 1994,
"max_line_length": 116,
"avg_line_length": 37.32447342026078,
"alnum_prop": 0.6157608330534095,
"repo_name": "paolodedios/tensorflow",
"id": "f8f161d4ff074e4d60c0b7926f9be5a473790127",
"size": "75114",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/nn_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1387968"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "125994873"
},
{
"name": "CMake",
"bytes": "182324"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416133"
},
{
"name": "Go",
"bytes": "2129888"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074438"
},
{
"name": "Jupyter Notebook",
"bytes": "792906"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11402294"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "172666"
},
{
"name": "Objective-C++",
"bytes": "300208"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42775737"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "621520"
},
{
"name": "Smarty",
"bytes": "89545"
},
{
"name": "SourcePawn",
"bytes": "14625"
},
{
"name": "Starlark",
"bytes": "7727119"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
import re
class Directives:
def __init__(self, start_block_pattern, end_block_pattern):
self.__directives_block_pattern = re.compile(start_block_pattern + r' beautify( \w+[:]\w+)+ ' + end_block_pattern)
self.__directive_pattern = re.compile(r' (\w+)[:](\w+)')
self.__directives_end_ignore_pattern = re.compile(start_block_pattern + r'\sbeautify\signore:end\s' + end_block_pattern)
def get_directives(self, text):
if not self.__directives_block_pattern.match(text):
return None
directives = {}
directive_match = self.__directive_pattern.search(text)
while directive_match:
directives[directive_match.group(1)] = directive_match.group(2)
directive_match = self.__directive_pattern.search(
text, directive_match.end())
return directives
def readIgnored(self, input):
return input.readUntilAfter(self.__directives_end_ignore_pattern)
| {
"content_hash": "f629836485d890cd4f40fecfff9377a1",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 128,
"avg_line_length": 33.689655172413794,
"alnum_prop": 0.6345957011258956,
"repo_name": "Masood-M/yalih",
"id": "cad31f64ad1db3a9669d7d7f86a5e057b38b5a87",
"size": "2139",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jsbeautifier/build/lib.linux-x86_64-2.7/jsbeautifier/core/directives.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6697"
},
{
"name": "C",
"bytes": "1847103"
},
{
"name": "C++",
"bytes": "31699"
},
{
"name": "HTML",
"bytes": "1462"
},
{
"name": "JavaScript",
"bytes": "5213"
},
{
"name": "Lex",
"bytes": "39109"
},
{
"name": "M4",
"bytes": "10332"
},
{
"name": "Makefile",
"bytes": "5638"
},
{
"name": "Objective-C",
"bytes": "3144"
},
{
"name": "Python",
"bytes": "3464909"
},
{
"name": "Roff",
"bytes": "5201"
},
{
"name": "Shell",
"bytes": "19330"
},
{
"name": "YARA",
"bytes": "5624"
},
{
"name": "Yacc",
"bytes": "74397"
}
],
"symlink_target": ""
} |
"""Gradients for operators defined in nn_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
@ops.RegisterGradient("Conv2DBackpropInput")
def _Conv2DBackpropInputGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
return [None,
nn_ops.conv2d_backprop_filter(grad, array_ops.shape(op.inputs[1]),
op.inputs[2], op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
nn_ops.conv2d(grad, op.inputs[1], op.get_attr("strides"),
op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("Conv2DBackpropFilter")
def _Conv2DBackpropFilterGrad(op, grad):
return [
nn_ops.conv2d_backprop_input(
array_ops.shape(op.inputs[0]), grad, op.inputs[2],
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
None,
nn_ops.conv2d(
op.inputs[0], grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))
]
@ops.RegisterGradient("Conv3D")
def _Conv3DGrad(op, grad):
data_format = op.get_attr("data_format")
return [nn_ops.conv3d_backprop_input_v2(array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format),
nn_ops.conv3d_backprop_filter_v2(op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)]
@ops.RegisterGradient("Conv3DBackpropInputV2")
def _Conv3DBackpropInputGrad(op, grad):
data_format = op.get_attr("data_format")
return [None,
nn_ops.conv3d_backprop_filter_v2(grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format),
nn_ops.conv3d(grad,
op.inputs[1],
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)]
@ops.RegisterGradient("Conv3DBackpropFilterV2")
def _Conv3DBackpropFilterGrad(op, grad):
data_format = op.get_attr("data_format")
return [nn_ops.conv3d_backprop_input_v2(array_ops.shape(op.inputs[0]),
grad,
op.inputs[2],
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format),
None,
nn_ops.conv3d(op.inputs[0],
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)]
@ops.RegisterGradient("AvgPool3D")
def _AvgPool3DGrad(op, grad):
return gen_nn_ops._avg_pool3d_grad(
array_ops.shape(op.inputs[0]),
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("AvgPool3DGrad")
def _AvgPool3DGradGrad(op, grad):
return (array_ops.stop_gradient(op.inputs[0]), gen_nn_ops.avg_pool3d(
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPool3D")
def _MaxPool3DGrad(op, grad):
return gen_nn_ops._max_pool3d_grad(
op.inputs[0],
op.outputs[0],
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("MaxPool3DGrad")
def _MaxPool3DGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype), array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops._max_pool3d_grad_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPool3DGradGrad")
def _MaxPool3DGradGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype), array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops._max_pool3d_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("Softmax")
def _SoftmaxGrad(op, grad_softmax):
"""The derivative of the softmax nonlinearity.
We assume that probs is of shape [batch_size * dim]
The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
This matrix is diagonal minus a rank one matrix, so it is easy to implement
as follows:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
Args:
op: the Softmax op.
grad_softmax: the tensor representing the gradient w.r.t. the
softmax output.
Returns:
gradient w.r.t the input to the softmax
"""
# TODO(ilyasu): assert that the tensor has two dimensions at
# graph-construction time? Alternatively: do different things
# depending on the dimensionality of the input tensors.
softmax = op.outputs[0]
grad_x = ((grad_softmax - array_ops.reshape(
math_ops.reduce_sum(grad_softmax * softmax, [1]), [-1, 1])) * softmax)
return grad_x
@ops.RegisterGradient("LogSoftmax")
def _LogSoftmaxGrad(op, grad):
"""The gradient for log_softmax.
log_softmax = input - log(sum(exp(input))
dlog_softmax/dinput = diag - softmax(input)
Args:
op: The log softmax op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
softmax = math_ops.exp(op.outputs[0])
return grad - math_ops.reduce_sum(grad, 1, keep_dims=True) * softmax
@ops.RegisterGradient("BiasAdd")
def _BiasAddGrad(op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
return (received_grad, gen_nn_ops.bias_add_grad(out_backprop=received_grad,
data_format=data_format))
@ops.RegisterGradient("BiasAddGrad")
def _BiasAddGradGrad(op, received_grad):
"""Gradient for the BiasAddGrad op.
Args:
op: BiasAddGrad op for which we are calculating gradients.
received_grad: The gradients passed to the BiasAddGrad op.
Returns:
A single gradient Tensor for the input to BiasAddGrad (which
is the gradient of the bias term in BiasAdd)
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
shape = array_ops.shape(op.inputs[0])
rank = array_ops.rank(op.inputs[0])
bias_shape = array_ops.shape(received_grad)
if data_format == b"NCHW":
expanded_shape = array_ops.concat([
array_ops.ones_like(shape[:-3]), bias_shape,
array_ops.ones_like(shape[-2:])
], 0)
tile_mults = array_ops.concat([shape[:-3], [1], shape[-2:]], 0)
else:
expanded_shape = array_ops.concat(
[array_ops.ones_like(shape[:-1]), bias_shape], 0)
tile_mults = array_ops.concat([shape[:-1], [1]], 0)
expanded_grad = array_ops.reshape(received_grad, expanded_shape)
return array_ops.tile(expanded_grad, tile_mults)
@ops.RegisterGradient("BiasAddV1")
def _BiasAddGradV1(unused_bias_op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
unused_bias_op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)
return (received_grad, math_ops.reduce_sum(received_grad,
reduction_dim_tensor))
@ops.RegisterGradient("Relu")
def _ReluGrad(op, grad):
return gen_nn_ops._relu_grad(grad, op.outputs[0])
@ops.RegisterGradient("EluGrad")
def _EluGradGrad(op, grad):
elu_x = op.inputs[1]
return (gen_nn_ops._elu_grad(grad, op.outputs[0]),
array_ops.where(elu_x < 0,
grad * op.inputs[0],
array_ops.zeros(shape=array_ops.shape(elu_x),
dtype=elu_x.dtype)))
@ops.RegisterGradient("Relu6")
def _Relu6Grad(op, grad):
return gen_nn_ops._relu6_grad(grad, op.inputs[0])
@ops.RegisterGradient("Elu")
def _EluGrad(op, grad):
return gen_nn_ops._elu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Softplus")
def _SoftplusGrad(op, grad):
return gen_nn_ops._softplus_grad(grad, op.inputs[0])
@ops.RegisterGradient("SoftplusGrad")
def _SoftplusGradGrad(op, grad):
# Let:
# y = tf.nn.softplus(x)
# dx = gen_nn_ops._softplus_grad(dy, x) = dy / (1 + exp(-x))
# This op computes (ddy, d2x) from op.inputs == [dy, x] and grad == ddx.
dy, x = op.inputs
with ops.control_dependencies([grad.op]):
ddy = gen_nn_ops._softplus_grad(grad, x) # pylint: disable=protected-access
d2x = grad * dy / (math_ops.exp(-x) + 2.0 + math_ops.exp(x))
return (ddy, d2x)
@ops.RegisterGradient("Softsign")
def _SoftsignGrad(op, grad):
return gen_nn_ops._softsign_grad(grad, op.inputs[0])
@ops.RegisterGradient("ReluGrad")
def _ReluGradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops._relu_grad(grad, x), array_ops.zeros(
shape=array_ops.shape(x), dtype=x.dtype))
def _BroadcastMul(vec, mat):
"""Multiply after broadcasting vec to match dimensions of mat.
Args:
vec: A 1-D tensor of dimension [D0]
mat: A 2-D tensor of dimension [D0, D1]
Returns:
A tensor of dimension [D0, D1], the result of vec * mat
"""
# Reshape vec to [D0, 1]
vec = array_ops.expand_dims(vec, -1)
return vec * mat
@ops.RegisterGradient("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_loss, grad_grad):
"""Gradient function for SoftmaxCrossEntropyWithLogits."""
# grad_loss is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# grad_grad is the backprop for softmax gradient.
# There is no gradient for the labels
#
# Second derivative is just softmax derivative w.r.t. logits.
softmax_grad = op.outputs[1]
grad = _BroadcastMul(grad_loss, softmax_grad)
def IsZero(g):
# Some introspection to check if the gradient is feeding zeros
if g.op.type in ("ZerosLike", "Zeros"):
return True
const_fill_value = tensor_util.constant_value(g)
return const_fill_value is not None and (const_fill_value == 0).all()
if not IsZero(grad_grad):
logits = op.inputs[0]
softmax = nn_ops.softmax(logits)
grad += ((grad_grad - array_ops.squeeze(
math_ops.matmul(grad_grad[:, None, :],
softmax[:, :, None]), axis=1)) * softmax)
return grad, None
@ops.RegisterGradient("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
"""Gradient function for SparseSoftmaxCrossEntropyWithLogits."""
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
#
# Currently there is no way to take the second derivative of this op
# due to the fused implementation's interaction with tf.gradients(),
# so we make sure we prevent silently incorrect results by raising
# an error if the second derivative is requested via prevent_gradient.
sparse_softmax_grad_without_gradient = array_ops.prevent_gradient(
op.outputs[1], message="Currently there is no way to take the second "
"derivative of sparse_softmax_cross_entropy_with_logits due to the fused "
"implementation's interaction with tf.gradients()")
return _BroadcastMul(grad_0, sparse_softmax_grad_without_gradient), None
@ops.RegisterGradient("Conv2D")
def _Conv2DGrad(op, grad):
return [nn_ops.conv2d_backprop_input(
array_ops.shape(op.inputs[0]), op.inputs[1], grad, op.get_attr("strides"),
op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
nn_ops.conv2d_backprop_filter(op.inputs[0],
array_ops.shape(op.inputs[1]), grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("DepthwiseConv2dNative")
def _DepthwiseConv2dNativeGrad(op, grad):
return [
nn_ops.depthwise_conv2d_native_backprop_input(
array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")),
nn_ops.depthwise_conv2d_native_backprop_filter(
op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format"))
]
@ops.RegisterGradient("Dilation2D")
def _Dilation2DGrad(op, grad):
return [nn_ops.dilation2d_backprop_input(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding")),
nn_ops.dilation2d_backprop_filter(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding"))]
@ops.RegisterGradient("LRN")
def _LRNGrad(op, grad):
depth_radius = op.get_attr("depth_radius")
bias = op.get_attr("bias")
alpha = op.get_attr("alpha")
beta = op.get_attr("beta")
return [gen_nn_ops._lrn_grad(grad, op.inputs[0], op.outputs[0], depth_radius,
bias, alpha, beta)]
@ops.RegisterGradient("AvgPool")
def _AvgPoolGrad(op, grad):
return gen_nn_ops._avg_pool_grad(
array_ops.shape(op.inputs[0]),
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("AvgPoolGrad")
def _AvgPoolGradGrad(op, grad):
return (array_ops.stop_gradient(op.inputs[0]), gen_nn_ops._avg_pool(
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPool")
def _MaxPoolGrad(op, grad):
return gen_nn_ops._max_pool_grad(op.inputs[0],
op.outputs[0],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("MaxPoolWithArgmax")
def _MaxPoolGradWithArgmax(op, grad, unused_argmax_grad):
return gen_nn_ops._max_pool_grad_with_argmax(op.inputs[0],
grad,
op.outputs[1],
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"))
@ops.RegisterGradient("MaxPoolGrad")
def _MaxPoolGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype), array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops._max_pool_grad_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPoolGradGrad")
def _MaxPoolGradGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype), array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops._max_pool_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("FractionalMaxPool")
def _FractionalMaxPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalMaxPool.
Since FractionalMaxPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalMaxPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalMaxPool op.
"""
# pylint: disable=protected-access
return gen_nn_ops._fractional_max_pool_grad(op.inputs[0], op.outputs[0],
grad_0, op.outputs[1],
op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("FractionalAvgPool")
def _FractionalAvgPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalAvgPool.
Since FractionalAvgPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalAvgPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalAvgPool op.
"""
# pylint: disable=protected-access
return gen_nn_ops._fractional_avg_pool_grad(op.inputs[0].get_shape(), grad_0,
op.outputs[1], op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("BatchNormWithGlobalNormalization")
def _BatchNormWithGlobalNormalizationGrad(op, grad):
"""Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.
We do not backprop anything for the mean and var intentionally as they are
not being trained with backprop in the operation.
Args:
op: The BatchNormOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the BatchNormOp.
Returns:
dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))
dm: Backprop for mean, which is
sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))
dv: Backprop for variance, which is
sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)
db: Backprop for beta, which is grad reduced in all except the
last dimension.
dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
"""
dx, dm, dv, db, dg = gen_nn_ops._batch_norm_with_global_normalization_grad(
op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad,
op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization"))
return dx, dm, dv, db, dg
@ops.RegisterGradient("FusedBatchNorm")
def _FusedBatchNormGrad(op, *grad):
"""Return the gradients for the 3 inputs of BatchNorm.
Args:
op: The BatchNormOp for which we need to compute gradients.
*grad: An argument list for tensors of gradients wrt the outputs
with grad[0] as grad_y.
Returns:
grad_x: gradient for x, which is scale * rsqrt(variance + epsilon) *
[grad_y - mean(grad_y) - (x - mean(x)) *
mean(grad_y * (x - mean(x))) / (variance + epsilon)]
grad_scale: gradient for scale, which is sum(grad_y * (x - mean(x)) *
rsqrt(variance + epsilon))
grad_offset: gradient for offset, which is sum(grad_y)
"""
return gen_nn_ops.fused_batch_norm_grad(
grad[0],
op.inputs[0],
op.inputs[1],
op.outputs[3],
op.outputs[4],
epsilon=op.get_attr("epsilon"),
data_format=op.get_attr("data_format"),
is_training=op.get_attr("is_training"))
@ops.RegisterGradient("L2Loss")
def _L2LossGrad(op, grad):
"""Return the gradients for L2Loss.
Args:
op: The L2LossOp for which we need to generate gradients.
grad: Tensor containing a single number.
Returns:
The gradient, which is (x * grad).
"""
return op.inputs[0] * grad
@ops.RegisterGradient("TopK")
@ops.RegisterGradient("TopKV2")
def _TopKGrad(op, grad, _):
"""Return the gradients for TopK.
Args:
op: The TopKOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the TopKOp.
Returns:
A list of two tensors, the first being the gradient w.r.t to the input and
TopK, and the second being the gradient w.r.t. to the indices (all zero).
"""
in_shape = array_ops.shape(op.inputs[0])
ind_shape = array_ops.shape(op.outputs[1])
ind_lastdim = array_ops.gather(ind_shape, array_ops.size(ind_shape) - 1)
# Flatten indices to 2D.
ind_2d = array_ops.reshape(op.outputs[1], array_ops.stack([-1, ind_lastdim]))
in_lastdim = array_ops.gather(in_shape, array_ops.size(in_shape) - 1)
outerdim = array_ops.shape(ind_2d)[0]
# Compute linear indices (flattened to 1D).
ind = array_ops.reshape(ind_2d + array_ops.expand_dims(
math_ops.range(0, outerdim * in_lastdim, in_lastdim), -1), [-1])
# Substitute grad to appropriate locations and fill the rest with zeros,
# finally reshaping it to the original input shape.
return [array_ops.reshape(
sparse_ops.sparse_to_dense(ind,
array_ops.reshape(
math_ops.reduce_prod(in_shape), [1]),
array_ops.reshape(grad, [-1]),
validate_indices=False),
in_shape), array_ops.zeros(
[], dtype=dtypes.int32)]
| {
"content_hash": "33bdda421188d54d4c58a8b84bb546a9",
"timestamp": "",
"source": "github",
"line_count": 710,
"max_line_length": 80,
"avg_line_length": 36.25774647887324,
"alnum_prop": 0.5976770384182107,
"repo_name": "manazhao/tf_recsys",
"id": "f1453f9ef0de544bbaf2b18866736595230c0c15",
"size": "26432",
"binary": false,
"copies": "9",
"ref": "refs/heads/r1.0",
"path": "tensorflow/python/ops/nn_grad.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7666"
},
{
"name": "C",
"bytes": "193348"
},
{
"name": "C++",
"bytes": "27280674"
},
{
"name": "CMake",
"bytes": "177556"
},
{
"name": "Go",
"bytes": "929281"
},
{
"name": "Java",
"bytes": "333525"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37293"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Protocol Buffer",
"bytes": "254489"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "23927341"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "337394"
},
{
"name": "Vim script",
"bytes": "2803"
}
],
"symlink_target": ""
} |
import logging
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.http import Http404
from readthedocs.projects.models import Project, Domain
log = logging.getLogger(__name__)
LOG_TEMPLATE = u"(Middleware) {msg} [{host}{path}]"
class SubdomainMiddleware(object):
def process_request(self, request):
host = request.get_host().lower()
path = request.get_full_path()
log_kwargs = dict(host=host, path=path)
if settings.DEBUG:
log.debug(LOG_TEMPLATE.format(msg='DEBUG on, not processing middleware', **log_kwargs))
return None
if ':' in host:
host = host.split(':')[0]
domain_parts = host.split('.')
# Serve subdomains - but don't depend on the production domain only having 2 parts
if len(domain_parts) == len(settings.PRODUCTION_DOMAIN.split('.')) + 1:
subdomain = domain_parts[0]
is_www = subdomain.lower() == 'www'
is_ssl = subdomain.lower() == 'ssl'
if not is_www and not is_ssl and settings.PRODUCTION_DOMAIN in host:
request.subdomain = True
request.slug = subdomain
request.urlconf = 'readthedocs.core.subdomain_urls'
return None
# Serve CNAMEs
if settings.PRODUCTION_DOMAIN not in host and \
'localhost' not in host and \
'testserver' not in host:
request.cname = True
domains = Domain.objects.filter(domain=host)
if domains.count():
for domain in domains:
if domain.domain == host:
request.slug = domain.project.slug
request.urlconf = 'core.subdomain_urls'
request.domain_object = True
domain.count = domain.count + 1
domain.save()
log.debug(LOG_TEMPLATE.format(
msg='Domain Object Detected: %s' % domain.domain, **log_kwargs))
break
if not hasattr(request, 'domain_object') and 'HTTP_X_RTD_SLUG' in request.META:
request.slug = request.META['HTTP_X_RTD_SLUG'].lower()
request.urlconf = 'readthedocs.core.subdomain_urls'
request.rtdheader = True
log.debug(LOG_TEMPLATE.format(
msg='X-RTD-Slug header detetected: %s' % request.slug, **log_kwargs))
# Try header first, then DNS
elif not hasattr(request, 'domain_object'):
try:
slug = cache.get(host)
if not slug:
from dns import resolver
answer = [ans for ans in resolver.query(host, 'CNAME')][0]
domain = answer.target.to_unicode().lower()
slug = domain.split('.')[0]
cache.set(host, slug, 60 * 60)
# Cache the slug -> host mapping permanently.
log.debug(LOG_TEMPLATE.format(
msg='CNAME cached: %s->%s' % (slug, host),
**log_kwargs))
request.slug = slug
request.urlconf = 'readthedocs.core.subdomain_urls'
log.debug(LOG_TEMPLATE.format(
msg='CNAME detetected: %s' % request.slug,
**log_kwargs))
try:
proj = Project.objects.get(slug=slug)
domain, created = Domain.objects.get_or_create(
project=proj,
domain=host,
)
if created:
domain.machine = True
domain.cname = True
domain.count = domain.count + 1
domain.save()
except (ObjectDoesNotExist, MultipleObjectsReturned):
log.debug(LOG_TEMPLATE.format(
msg='Project CNAME does not exist: %s' % slug,
**log_kwargs))
except:
# Some crazy person is CNAMEing to us. 404.
log.exception(LOG_TEMPLATE.format(msg='CNAME 404', **log_kwargs))
raise Http404(_('Invalid hostname'))
# Google was finding crazy www.blah.readthedocs.org domains.
# Block these explicitly after trying CNAME logic.
if len(domain_parts) > 3:
# Stop www.fooo.readthedocs.org
if domain_parts[0] == 'www':
log.debug(LOG_TEMPLATE.format(msg='404ing long domain', **log_kwargs))
raise Http404(_('Invalid hostname'))
log.debug(LOG_TEMPLATE.format(msg='Allowing long domain name', **log_kwargs))
# raise Http404(_('Invalid hostname'))
# Normal request.
return None
class SingleVersionMiddleware(object):
"""Reset urlconf for requests for 'single_version' docs.
In settings.MIDDLEWARE_CLASSES, SingleVersionMiddleware must follow
after SubdomainMiddleware.
"""
def _get_slug(self, request):
"""Get slug from URLs requesting docs.
If URL is like '/docs/<project_name>/', we split path
and pull out slug.
If URL is subdomain or CNAME, we simply read request.slug, which is
set by SubdomainMiddleware.
"""
slug = None
if hasattr(request, 'slug'):
# Handle subdomains and CNAMEs.
slug = request.slug.lower()
else:
# Handle '/docs/<project>/' URLs
path = request.get_full_path()
path_parts = path.split('/')
if len(path_parts) > 2 and path_parts[1] == 'docs':
slug = path_parts[2].lower()
return slug
def process_request(self, request):
slug = self._get_slug(request)
if slug:
try:
proj = Project.objects.get(slug=slug)
except (ObjectDoesNotExist, MultipleObjectsReturned):
# Let 404 be handled further up stack.
return None
if getattr(proj, 'single_version', False):
request.urlconf = 'readthedocs.core.single_version_urls'
# Logging
host = request.get_host()
path = request.get_full_path()
log_kwargs = dict(host=host, path=path)
log.debug(LOG_TEMPLATE.format(
msg='Handling single_version request', **log_kwargs)
)
return None
| {
"content_hash": "b3acc0cc7bd8519860464a468597c361",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 99,
"avg_line_length": 42.355828220858896,
"alnum_prop": 0.5238991888760139,
"repo_name": "SteveViss/readthedocs.org",
"id": "ac0a94772124dee7fd8c5cfd43fdac508bdb11a6",
"size": "6904",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "readthedocs/core/middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "52861"
},
{
"name": "HTML",
"bytes": "196232"
},
{
"name": "JavaScript",
"bytes": "432018"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "870201"
},
{
"name": "Shell",
"bytes": "367"
}
],
"symlink_target": ""
} |
default_app_config = 'brasilcomvc.accounts.apps.AccountsAppConfig'
| {
"content_hash": "52ff653671d9fb0c35c7d97fb61e4164",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 66,
"avg_line_length": 67,
"alnum_prop": 0.835820895522388,
"repo_name": "brasilcomvc/brasilcomvc",
"id": "3b097450f8fb7fc5dcc47802232921fc616cc852",
"size": "67",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "brasilcomvc/accounts/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "20338"
},
{
"name": "CoffeeScript",
"bytes": "6868"
},
{
"name": "HTML",
"bytes": "35753"
},
{
"name": "Python",
"bytes": "114063"
},
{
"name": "Shell",
"bytes": "94"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import os.path
# Package data
# ------------
_name = 'jpl.mcl.site.sciencedata'
_version = '0.0.7'
_description = 'Science data for the MCL site'
_url = 'https://github.com/MCLConsortium/' + _name
_downloadURL = 'https://github.com/MCLConsortium/' + _name + '/archive/' + _version + '.zip'
_author = 'Sean Kelly'
_authorEmail = 'sean.kelly@jpl.nasa.gov'
_maintainer = 'Sean Kelly'
_maintainerEmail = 'sean.kelly@jpl.nasa.gov'
_license = 'Proprietary'
_namespaces = ['jpl', 'jpl.mcl', 'jpl.mcl.site']
_zipSafe = False
_keywords = 'plone zope science data mcl'
_testSuite = 'jpl.mcl.site.sciencedata.tests.test_suite'
_extras = {
'test': ['plone.app.testing'],
}
_entryPoints = {
'z3c.autoinclude.plugin': ['target=plone'],
}
_requirements = [
'setuptools',
'eea.facetednavigation',
'jpl.mcl.site.knowledge',
'mysolr',
'plone.api',
'plone.app.collection',
'plone.app.dexterity [grok, relations]',
'plone.app.imaging',
'plone.app.registry',
'plone.app.relationfield',
'plone.behavior',
'plone.formwidget.contenttree',
'Products.Archetypes',
'Products.ATContentTypes',
'Products.CMFPlacefulWorkflow',
'Products.CMFPlone',
'rdflib',
'z3c.relationfield',
]
_classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Plone',
'License :: Other/Proprietary License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
]
# Setup Metadata
# --------------
#
# Nothing below here should require updating.
def _read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
_header = '*' * len(_name) + '\n' + _name + '\n' + '*' * len(_name)
_longDescription = _header + '\n\n' + _read('README.rst') + '\n\n' + _read('docs', 'INSTALL.txt') + '\n\n' \
+ _read('docs', 'HISTORY.txt')
open('doc.txt', 'w').write(_longDescription)
setup(
author=_author,
author_email=_authorEmail,
classifiers=_classifiers,
description=_description,
download_url=_downloadURL,
entry_points=_entryPoints,
extras_require=_extras,
include_package_data=True,
install_requires=_requirements,
keywords=_keywords,
license=_license,
long_description=_longDescription,
maintainer=_maintainer,
maintainer_email=_maintainerEmail,
name=_name,
namespace_packages=_namespaces,
packages=find_packages('src', exclude=['ez_setup', 'bootstrap']),
package_dir={'': 'src'},
test_suite=_testSuite,
url=_url,
version=_version,
zip_safe=_zipSafe,
)
| {
"content_hash": "83d0e03662d9cf8bc97c6ebe2cfc2c25",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 108,
"avg_line_length": 30.0531914893617,
"alnum_prop": 0.6230088495575221,
"repo_name": "MCLConsortium/mcl-site",
"id": "135f70918bbac9925de809b96f322162ff0f1e3d",
"size": "2844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/jpl.mcl.site.sciencedata/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "55290"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
} |
from django.core.paginator import Paginator
def paginate(queue, per_page):
paginator = Paginator(queue, per_page)
for num in paginator.page_range:
yield paginator.page(num)
| {
"content_hash": "b2e7348eae9a3cc823d70dcce4123965",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 43,
"avg_line_length": 27.285714285714285,
"alnum_prop": 0.7225130890052356,
"repo_name": "akabos/python-django-djapian",
"id": "b3bc49db06dcede57b07f500af1b8e9d9c1586d0",
"size": "191",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "src/djapian/utils/paging.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "60389"
}
],
"symlink_target": ""
} |
import time
from twitter.common import log
from twitter.common.quantity import Amount, Time
from apache.aurora.admin.admin_util import format_sla_results, print_results
from apache.aurora.client.api import AuroraClientAPI
from apache.aurora.client.base import check_and_log_response, DEFAULT_GROUPING, group_hosts
from gen.apache.aurora.api.ttypes import Hosts, MaintenanceMode
class HostMaintenance(object):
"""Submit requests to the scheduler to put hosts into and out of maintenance
mode so they can be operated upon without causing LOST tasks.
Aurora provides a two-tiered concept of Maintenance. The first step is to initiate maintenance,
which will ask the Aurora scheduler to de-prioritize scheduling on a large set of hosts (the ones
that will be operated upon during this maintenance window). Once all hosts have been tagged in
this manner, the operator can begin draining individual machines, which will have all user-tasks
killed and rescheduled. When the tasks get placed onto a new machine, the scheduler will first
look for hosts that do not have the maintenance tag, which will help decrease churn and prevent a
task from being constantly killed as its hosts go down from underneath it.
"""
START_MAINTENANCE_DELAY = Amount(30, Time.SECONDS)
SLA_MIN_JOB_INSTANCE_COUNT = 20
SLA_UPTIME_PERCENTAGE_LIMIT = 95
SLA_UPTIME_DURATION_LIMIT = Amount(30, Time.MINUTES)
@classmethod
def iter_batches(cls, hostnames, grouping_function=DEFAULT_GROUPING):
groups = group_hosts(hostnames, grouping_function)
groups = sorted(groups.items(), key=lambda v: v[0])
for group in groups:
yield Hosts(group[1])
def __init__(self, cluster, verbosity):
self._client = AuroraClientAPI(cluster, verbosity == 'verbose')
def _drain_hosts(self, drainable_hosts, clock=time):
""""Drains tasks from the specified hosts.
This will move active tasks on these hosts to the DRAINING state, causing them to be
rescheduled elsewhere.
:param drainable_hosts: Hosts that are in maintenance mode and ready to be drained
:type drainable_hosts: gen.apache.aurora.ttypes.Hosts
:param clock: time module for testing
:type clock: time
"""
check_and_log_response(self._client.drain_hosts(drainable_hosts))
not_ready_hostnames = [hostname for hostname in drainable_hosts.hostNames]
while not_ready_hostnames:
log.info("Sleeping for %s." % self.START_MAINTENANCE_DELAY)
clock.sleep(self.START_MAINTENANCE_DELAY.as_(Time.SECONDS))
resp = self._client.maintenance_status(Hosts(set(not_ready_hostnames)))
if not resp.result.maintenanceStatusResult.statuses:
not_ready_hostnames = None
for host_status in resp.result.maintenanceStatusResult.statuses:
if host_status.mode != MaintenanceMode.DRAINED:
log.warning('%s is currently in status %s' %
(host_status.host, MaintenanceMode._VALUES_TO_NAMES[host_status.mode]))
else:
not_ready_hostnames.remove(host_status.host)
def _complete_maintenance(self, drained_hosts):
"""End the maintenance status for a given set of hosts.
:param drained_hosts: Hosts that are drained and finished being operated upon
:type drained_hosts: gen.apache.aurora.ttypes.Hosts
"""
check_and_log_response(self._client.end_maintenance(drained_hosts))
resp = self._client.maintenance_status(drained_hosts)
for host_status in resp.result.maintenanceStatusResult.statuses:
if host_status.mode != MaintenanceMode.NONE:
log.warning('%s is DRAINING or in DRAINED' % host_status.host)
def _operate_on_hosts(self, drained_hosts, callback):
"""Perform a given operation on a list of hosts that are ready for maintenance.
:param drained_hosts: Hosts that have been drained (via _drain_hosts)
:type drained_hosts: gen.apache.aurora.ttypes.Hosts
:param callback: Function to call one hostname at a time
:type callback: function
"""
for hostname in drained_hosts.hostNames:
callback(hostname)
def _check_sla(self, hostnames, grouping_function, percentage=None, duration=None):
"""Check if the provided list of hosts passes the job uptime SLA check.
This is an all-or-nothing check, meaning that all provided hosts must pass their job
SLA check for the maintenance to proceed.
:param hostnames: list of host names to check SLA for
:type hostnames: list of strings
:param grouping_function: grouping function to apply to the given hosts
:type grouping_function: function
:param percentage: SLA uptime percentage override
:type percentage: float
:param duration: SLA uptime duration override
:type duration: twitter.common.quantity.Amount
:rtype: set of unsafe hosts
"""
sla_percentage = percentage or self.SLA_UPTIME_PERCENTAGE_LIMIT
sla_duration = duration or self.SLA_UPTIME_DURATION_LIMIT
vector = self._client.sla_get_safe_domain_vector(self.SLA_MIN_JOB_INSTANCE_COUNT, hostnames)
host_groups = vector.probe_hosts(
sla_percentage,
sla_duration.as_(Time.SECONDS),
grouping_function)
unsafe_hostnames = set()
# Given that maintenance is performed 1 group at a time, any result longer than 1 group
# should be considered a batch failure.
if host_groups:
if len(host_groups) > 1:
log.error('Illegal multiple groups detected in SLA results. Skipping hosts: %s' % hostnames)
return set(hostnames)
results, unsafe_hostnames = format_sla_results(host_groups, unsafe_only=True)
if results:
print_results(results)
return unsafe_hostnames
return unsafe_hostnames
def end_maintenance(self, hostnames):
"""Pull a list of hostnames out of maintenance mode.
:param hostnames: List of hosts to operate upon
:type hostnames: list of strings
"""
self._complete_maintenance(Hosts(set(hostnames)))
def start_maintenance(self, hostnames):
"""Put a list of hostnames into maintenance mode, to de-prioritize scheduling.
This is part of two-phase draining- tasks will still be running on these hosts until
drain_hosts is called upon them.
:param hostnames: List of hosts to set for initial maintenance
:type hostnames: list of strings
:rtype: list of hostnames with the maintenance mode set
"""
resp = self._client.start_maintenance(Hosts(set(hostnames)))
check_and_log_response(resp)
result = [host_status.host for host_status in resp.result.startMaintenanceResult.statuses]
if len(result) != len(hostnames):
log.warning('Skipping maintenance for unknown hosts: %s' % (set(hostnames) - set(result)))
return result
def perform_maintenance(self, hostnames, grouping_function=DEFAULT_GROUPING,
callback=None, percentage=None, duration=None, output_file=None):
"""Wrap a callback in between sending hosts into maintenance mode and back.
Walk through the process of putting hosts into maintenance, draining them of tasks,
performing an action on them once drained, then removing them from maintenance mode
so tasks can schedule.
:param hostnames: A list of hostnames to operate upon
:type hostnames: list of strings
:param grouping_function: How to split up the hostname into groups
:type grouping_function: function
:param callback: Function to call once hosts are drained
:type callback: function
:param percentage: SLA percentage to use
:type percentage: float
:param duration: SLA duration to use
:type duration: twitter.common.quantity.Time
:param output_file: file to write hosts that were not drained due to failed SLA check
:type output_file: string
"""
hostnames = self.start_maintenance(hostnames)
not_drained_hostnames = set()
for hosts in self.iter_batches(hostnames, grouping_function):
log.info('Beginning SLA check for %s' % hosts.hostNames)
unsafe_hostnames = self._check_sla(
list(hosts.hostNames),
grouping_function,
percentage,
duration)
if unsafe_hostnames:
log.warning('Some hosts did not pass SLA check and will not be drained! '
'Skipping hosts: %s' % unsafe_hostnames)
self._complete_maintenance(Hosts(unsafe_hostnames))
not_drained_hostnames |= unsafe_hostnames
drainable_hostnames = hosts.hostNames - unsafe_hostnames
if not drainable_hostnames:
continue
hosts = Hosts(drainable_hostnames)
else:
log.info('All hosts passed SLA check.')
self._drain_hosts(hosts)
if callback:
self._operate_on_hosts(hosts, callback)
self._complete_maintenance(hosts)
if not_drained_hostnames:
output = '\n'.join(list(not_drained_hostnames))
log.info('The following hosts did not pass SLA check and were not drained:')
print(output)
if output_file:
try:
with open(output_file, 'w') as fp:
fp.write(output)
fp.write('\n')
log.info('Written unsafe host names into: %s' % output_file)
except IOError as e:
log.error('Failed to write into the output file: %s' % e)
def check_status(self, hostnames):
"""Query the scheduler to determine the maintenance status for a list of hostnames
:param hostnames: Hosts to query for
:type hostnames: list of strings
:rtype: list of 2-tuples, hostname and MaintenanceMode
"""
resp = self._client.maintenance_status(Hosts(set(hostnames)))
check_and_log_response(resp)
statuses = []
for host_status in resp.result.maintenanceStatusResult.statuses:
statuses.append((host_status.host, MaintenanceMode._VALUES_TO_NAMES[host_status.mode]))
return statuses
| {
"content_hash": "e5cd9a118f5137a3052b7ba1de734c99",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 100,
"avg_line_length": 42.293103448275865,
"alnum_prop": 0.7100489196901753,
"repo_name": "mkhutornenko/incubator-aurora",
"id": "162f9afea67e409d784e436c96f55ceed0537ca1",
"size": "10361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/apache/aurora/admin/host_maintenance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2391"
},
{
"name": "Groovy",
"bytes": "15108"
},
{
"name": "Java",
"bytes": "1620408"
},
{
"name": "JavaScript",
"bytes": "71008"
},
{
"name": "Python",
"bytes": "1327488"
},
{
"name": "Ruby",
"bytes": "4252"
},
{
"name": "Shell",
"bytes": "53305"
}
],
"symlink_target": ""
} |
from .version import VERSION, RELEASE
| {
"content_hash": "6c416b554aa060fc2acd18d231dc52da",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 37,
"avg_line_length": 19.5,
"alnum_prop": 0.7948717948717948,
"repo_name": "tartley/genesis",
"id": "972cd83bc75e5b8cf653569460ce2d53d4aa6a3f",
"size": "40",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "genesis_src/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "190"
},
{
"name": "Makefile",
"bytes": "2376"
},
{
"name": "Python",
"bytes": "18306"
},
{
"name": "Shell",
"bytes": "160"
}
],
"symlink_target": ""
} |
from .profile import * # noqa
| {
"content_hash": "28fcdbf1af5ff02322ec7f431f8494bf",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 30,
"avg_line_length": 31,
"alnum_prop": 0.6774193548387096,
"repo_name": "ecobasa/ecobasa",
"id": "2cb0d9ccdc9e4caa59812488ff3db359c897e813",
"size": "56",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ecobasa/models/serializers/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10161"
},
{
"name": "Dockerfile",
"bytes": "799"
},
{
"name": "HTML",
"bytes": "363806"
},
{
"name": "JavaScript",
"bytes": "24440"
},
{
"name": "Less",
"bytes": "1595"
},
{
"name": "Python",
"bytes": "877232"
},
{
"name": "SCSS",
"bytes": "42435"
}
],
"symlink_target": ""
} |
"""
Field classes.
"""
import datetime
import os
import re
import time
import urlparse
import warnings
from decimal import Decimal, DecimalException
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.core.exceptions import ValidationError
from django.core import validators
import django.utils.copycompat as copy
from django.utils import formats
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode, smart_str
from django.utils.functional import lazy
# Provide this import for backwards compatibility.
from django.core.validators import EMPTY_VALUES
from util import ErrorList
from widgets import TextInput, PasswordInput, HiddenInput, MultipleHiddenInput, \
ClearableFileInput, CheckboxInput, Select, NullBooleanSelect, SelectMultiple, \
DateInput, DateTimeInput, TimeInput, SplitDateTimeWidget, SplitHiddenDateTimeWidget, \
FILE_INPUT_CONTRADICTION
__all__ = (
'Field', 'CharField', 'IntegerField',
'DEFAULT_DATE_INPUT_FORMATS', 'DateField',
'DEFAULT_TIME_INPUT_FORMATS', 'TimeField',
'DEFAULT_DATETIME_INPUT_FORMATS', 'DateTimeField', 'TimeField',
'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField',
'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField',
'ComboField', 'MultiValueField', 'FloatField', 'DecimalField',
'SplitDateTimeField', 'IPAddressField', 'FilePathField', 'SlugField',
'TypedChoiceField'
)
def en_format(name):
"""
Helper function to stay backward compatible.
"""
from django.conf.locale.en import formats
warnings.warn(
"`django.forms.fields.DEFAULT_%s` is deprecated; use `django.utils.formats.get_format('%s')` instead." % (name, name),
DeprecationWarning
)
return getattr(formats, name)
DEFAULT_DATE_INPUT_FORMATS = lazy(lambda: en_format('DATE_INPUT_FORMATS'), tuple, list)()
DEFAULT_TIME_INPUT_FORMATS = lazy(lambda: en_format('TIME_INPUT_FORMATS'), tuple, list)()
DEFAULT_DATETIME_INPUT_FORMATS = lazy(lambda: en_format('DATETIME_INPUT_FORMATS'), tuple, list)()
class Field(object):
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden".
default_validators = [] # Default set of validators
default_error_messages = {
'required': _(u'This field is required.'),
'invalid': _(u'Enter a valid value.'),
}
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text=None, error_messages=None, show_hidden_initial=False,
validators=[], localize=False):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# field in a form. By default, Django will use a "pretty"
# version of the form field name, if the Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# error_messages -- An optional dictionary to override the default
# messages that the field will raise.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
# validators -- List of addtional validators to use
# localize -- Boolean that specifies if the field should be localized.
if label is not None:
label = smart_unicode(label)
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
if help_text is None:
self.help_text = u''
else:
self.help_text = smart_unicode(help_text)
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
# Trigger the localization machinery if needed.
self.localize = localize
if self.localize:
widget.is_localized = True
# Let the widget know whether it should display as required.
widget.is_required = self.required
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
self.validators = self.default_validators + validators
def prepare_value(self, value):
return value
def to_python(self, value):
return value
def validate(self, value):
if value in validators.EMPTY_VALUES and self.required:
raise ValidationError(self.error_messages['required'])
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except ValidationError, e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise ValidationError(errors)
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
def bound_data(self, data, initial):
"""
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
"""
return data
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), returns a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
return result
class CharField(Field):
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
self.max_length, self.min_length = max_length, min_length
super(CharField, self).__init__(*args, **kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(min_length))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(max_length))
def to_python(self, value):
"Returns a Unicode object."
if value in validators.EMPTY_VALUES:
return u''
return smart_unicode(value)
def widget_attrs(self, widget):
if self.max_length is not None and isinstance(widget, (TextInput, PasswordInput)):
# The HTML attribute is maxlength, not max_length.
return {'maxlength': str(self.max_length)}
class IntegerField(Field):
default_error_messages = {
'invalid': _(u'Enter a whole number.'),
'max_value': _(u'Ensure this value is less than or equal to %(limit_value)s.'),
'min_value': _(u'Ensure this value is greater than or equal to %(limit_value)s.'),
}
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
super(IntegerField, self).__init__(*args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that int() can be called on the input. Returns the result
of int(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in validators.EMPTY_VALUES:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = int(str(value))
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'])
return value
class FloatField(IntegerField):
default_error_messages = {
'invalid': _(u'Enter a number.'),
}
def to_python(self, value):
"""
Validates that float() can be called on the input. Returns the result
of float(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in validators.EMPTY_VALUES:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'])
return value
class DecimalField(Field):
default_error_messages = {
'invalid': _(u'Enter a number.'),
'max_value': _(u'Ensure this value is less than or equal to %(limit_value)s.'),
'min_value': _(u'Ensure this value is greater than or equal to %(limit_value)s.'),
'max_digits': _('Ensure that there are no more than %s digits in total.'),
'max_decimal_places': _('Ensure that there are no more than %s decimal places.'),
'max_whole_digits': _('Ensure that there are no more than %s digits before the decimal point.')
}
def __init__(self, max_value=None, min_value=None, max_digits=None, decimal_places=None, *args, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, *args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
if value in validators.EMPTY_VALUES:
return None
if self.localize:
value = formats.sanitize_separators(value)
value = smart_str(value).strip()
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'])
return value
def validate(self, value):
super(DecimalField, self).validate(value)
if value in validators.EMPTY_VALUES:
return
# Check for NaN, Inf and -Inf values. We can't compare directly for NaN,
# since it is never equal to itself. However, NaN is the only value that
# isn't equal to itself, so we can use this to identify NaN
if value != value or value == Decimal("Inf") or value == Decimal("-Inf"):
raise ValidationError(self.error_messages['invalid'])
sign, digittuple, exponent = value.as_tuple()
decimals = abs(exponent)
# digittuple doesn't include any leading zeros.
digits = len(digittuple)
if decimals > digits:
# We have leading zeros up to or past the decimal point. Count
# everything past the decimal point as a digit. We do not count
# 0 before the decimal point as a digit since that would mean
# we would not allow max_digits = decimal_places.
digits = decimals
whole_digits = digits - decimals
if self.max_digits is not None and digits > self.max_digits:
raise ValidationError(self.error_messages['max_digits'] % self.max_digits)
if self.decimal_places is not None and decimals > self.decimal_places:
raise ValidationError(self.error_messages['max_decimal_places'] % self.decimal_places)
if self.max_digits is not None and self.decimal_places is not None and whole_digits > (self.max_digits - self.decimal_places):
raise ValidationError(self.error_messages['max_whole_digits'] % (self.max_digits - self.decimal_places))
return value
class DateField(Field):
widget = DateInput
default_error_messages = {
'invalid': _(u'Enter a valid date.'),
}
def __init__(self, input_formats=None, *args, **kwargs):
super(DateField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
def to_python(self, value):
"""
Validates that the input can be converted to a date. Returns a Python
datetime.date object.
"""
if value in validators.EMPTY_VALUES:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
for format in self.input_formats or formats.get_format('DATE_INPUT_FORMATS'):
try:
return datetime.date(*time.strptime(value, format)[:3])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
class TimeField(Field):
widget = TimeInput
default_error_messages = {
'invalid': _(u'Enter a valid time.')
}
def __init__(self, input_formats=None, *args, **kwargs):
super(TimeField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
def to_python(self, value):
"""
Validates that the input can be converted to a time. Returns a Python
datetime.time object.
"""
if value in validators.EMPTY_VALUES:
return None
if isinstance(value, datetime.time):
return value
for format in self.input_formats or formats.get_format('TIME_INPUT_FORMATS'):
try:
return datetime.time(*time.strptime(value, format)[3:6])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
class DateTimeField(Field):
widget = DateTimeInput
default_error_messages = {
'invalid': _(u'Enter a valid date/time.'),
}
def __init__(self, input_formats=None, *args, **kwargs):
super(DateTimeField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
def to_python(self, value):
"""
Validates that the input can be converted to a datetime. Returns a
Python datetime.datetime object.
"""
if value in validators.EMPTY_VALUES:
return None
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
if isinstance(value, list):
# Input comes from a SplitDateTimeWidget, for example. So, it's two
# components: date and time.
if len(value) != 2:
raise ValidationError(self.error_messages['invalid'])
if value[0] in validators.EMPTY_VALUES and value[1] in validators.EMPTY_VALUES:
return None
value = '%s %s' % tuple(value)
for format in self.input_formats or formats.get_format('DATETIME_INPUT_FORMATS'):
try:
return datetime.datetime(*time.strptime(value, format)[:6])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
class RegexField(CharField):
def __init__(self, regex, max_length=None, min_length=None, error_message=None, *args, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
error_message is an optional error message to use, if
'Enter a valid value' is too generic for you.
"""
# error_message is just kept for backwards compatibility:
if error_message:
error_messages = kwargs.get('error_messages') or {}
error_messages['invalid'] = error_message
kwargs['error_messages'] = error_messages
super(RegexField, self).__init__(max_length, min_length, *args, **kwargs)
if isinstance(regex, basestring):
regex = re.compile(regex)
self.regex = regex
self.validators.append(validators.RegexValidator(regex=regex))
class EmailField(CharField):
default_error_messages = {
'invalid': _(u'Enter a valid e-mail address.'),
}
default_validators = [validators.validate_email]
def clean(self, value):
value = self.to_python(value).strip()
return super(EmailField, self).clean(value)
class FileField(Field):
widget = ClearableFileInput
default_error_messages = {
'invalid': _(u"No file was submitted. Check the encoding type on the form."),
'missing': _(u"No file was submitted."),
'empty': _(u"The submitted file is empty."),
'max_length': _(u'Ensure this filename has at most %(max)d characters (it has %(length)d).'),
'contradiction': _(u'Please either submit a file or check the clear checkbox, not both.')
}
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
super(FileField, self).__init__(*args, **kwargs)
def to_python(self, data):
if data in validators.EMPTY_VALUES:
return None
# UploadedFile objects should have name and size attributes.
try:
file_name = data.name
file_size = data.size
except AttributeError:
raise ValidationError(self.error_messages['invalid'])
if self.max_length is not None and len(file_name) > self.max_length:
error_values = {'max': self.max_length, 'length': len(file_name)}
raise ValidationError(self.error_messages['max_length'] % error_values)
if not file_name:
raise ValidationError(self.error_messages['invalid'])
if not file_size:
raise ValidationError(self.error_messages['empty'])
return data
def clean(self, data, initial=None):
# If the widget got contradictory inputs, we raise a validation error
if data is FILE_INPUT_CONTRADICTION:
raise ValidationError(self.error_messages['contradiction'])
# False means the field value should be cleared; further validation is
# not needed.
if data is False:
if not self.required:
return False
# If the field is required, clearing is not possible (the widget
# shouldn't return False data in that case anyway). False is not
# in validators.EMPTY_VALUES; if a False value makes it this far
# it should be validated from here on out as None (so it will be
# caught by the required check).
data = None
if not data and initial:
return initial
return super(FileField, self).clean(data)
def bound_data(self, data, initial):
if data in (None, FILE_INPUT_CONTRADICTION):
return initial
return data
class ImageField(FileField):
default_error_messages = {
'invalid_image': _(u"Upload a valid image. The file you uploaded was either not an image or a corrupted image."),
}
def to_python(self, data):
"""
Checks that the file-upload field data contains a valid image (GIF, JPG,
PNG, possibly others -- whatever the Python Imaging Library supports).
"""
f = super(ImageField, self).to_python(data)
if f is None:
return None
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image
except ImportError:
import Image
# We need to get a file object for PIL. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = StringIO(data.read())
else:
file = StringIO(data['content'])
try:
# load() is the only method that can spot a truncated JPEG,
# but it cannot be called sanely after verify()
trial_image = Image.open(file)
trial_image.load()
# Since we're about to use the file again we have to reset the
# file object if possible.
if hasattr(file, 'reset'):
file.reset()
# verify() is the only method that can spot a corrupt PNG,
# but it must be called immediately after the constructor
trial_image = Image.open(file)
trial_image.verify()
except ImportError:
# Under PyPy, it is possible to import PIL. However, the underlying
# _imaging C module isn't available, so an ImportError will be
# raised. Catch and re-raise.
raise
except Exception: # Python Imaging Library doesn't recognize it as an image
raise ValidationError(self.error_messages['invalid_image'])
if hasattr(f, 'seek') and callable(f.seek):
f.seek(0)
return f
class URLField(CharField):
default_error_messages = {
'invalid': _(u'Enter a valid URL.'),
'invalid_link': _(u'This URL appears to be a broken link.'),
}
def __init__(self, max_length=None, min_length=None, verify_exists=False,
validator_user_agent=validators.URL_VALIDATOR_USER_AGENT, *args, **kwargs):
super(URLField, self).__init__(max_length, min_length, *args,
**kwargs)
self.validators.append(validators.URLValidator(verify_exists=verify_exists, validator_user_agent=validator_user_agent))
def to_python(self, value):
if value:
if '://' not in value:
# If no URL scheme given, assume http://
value = u'http://%s' % value
url_fields = list(urlparse.urlsplit(value))
if not url_fields[2]:
# the path portion may need to be added before query params
url_fields[2] = '/'
value = urlparse.urlunsplit(url_fields)
return super(URLField, self).to_python(value)
class BooleanField(Field):
widget = CheckboxInput
def to_python(self, value):
"""Returns a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if value in ('False', '0'):
value = False
else:
value = bool(value)
value = super(BooleanField, self).to_python(value)
if not value and self.required:
raise ValidationError(self.error_messages['required'])
return value
class NullBooleanField(BooleanField):
"""
A field whose valid values are None, True and False. Invalid values are
cleaned to None.
"""
widget = NullBooleanSelect
def to_python(self, value):
"""
Explicitly checks for the string 'True' and 'False', which is what a
hidden field will submit for True and False, and for '1' and '0', which
is what a RadioField will submit. Unlike the Booleanfield we need to
explicitly check for True, because we are not using the bool() function
"""
if value in (True, 'True', '1'):
return True
elif value in (False, 'False', '0'):
return False
else:
return None
def validate(self, value):
pass
class ChoiceField(Field):
widget = Select
default_error_messages = {
'invalid_choice': _(u'Select a valid choice. %(value)s is not one of the available choices.'),
}
def __init__(self, choices=(), required=True, widget=None, label=None,
initial=None, help_text=None, *args, **kwargs):
super(ChoiceField, self).__init__(required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs)
self.choices = choices
def _get_choices(self):
return self._choices
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
self._choices = self.widget.choices = list(value)
choices = property(_get_choices, _set_choices)
def to_python(self, value):
"Returns a Unicode object."
if value in validators.EMPTY_VALUES:
return u''
return smart_unicode(value)
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(ChoiceField, self).validate(value)
if value and not self.valid_value(value):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': value})
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == smart_unicode(k2):
return True
else:
if value == smart_unicode(k):
return True
return False
class TypedChoiceField(ChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', '')
super(TypedChoiceField, self).__init__(*args, **kwargs)
def to_python(self, value):
"""
Validate that the value is in self.choices and can be coerced to the
right type.
"""
value = super(TypedChoiceField, self).to_python(value)
super(TypedChoiceField, self).validate(value)
if value == self.empty_value or value in validators.EMPTY_VALUES:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': value})
return value
def validate(self, value):
pass
class MultipleChoiceField(ChoiceField):
hidden_widget = MultipleHiddenInput
widget = SelectMultiple
default_error_messages = {
'invalid_choice': _(u'Select a valid choice. %(value)s is not one of the available choices.'),
'invalid_list': _(u'Enter a list of values.'),
}
def to_python(self, value):
if not value:
return []
elif not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['invalid_list'])
return [smart_unicode(val) for val in value]
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise ValidationError(self.error_messages['required'])
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': val})
class ComboField(Field):
"""
A Field whose clean() method calls multiple Field clean() methods.
"""
def __init__(self, fields=(), *args, **kwargs):
super(ComboField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by ComboField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validates the given value against all of self.fields, which is a
list of Field instances.
"""
super(ComboField, self).clean(value)
for field in self.fields:
value = field.clean(value)
return value
class MultiValueField(Field):
"""
A Field that aggregates the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
'invalid': _(u'Enter a list of values.'),
}
def __init__(self, fields=(), *args, **kwargs):
super(MultiValueField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def validate(self, value):
pass
def clean(self, value):
"""
Validates every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = ErrorList()
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in validators.EMPTY_VALUES]:
if self.required:
raise ValidationError(self.error_messages['required'])
else:
return self.compress([])
else:
raise ValidationError(self.error_messages['invalid'])
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if self.required and field_value in validators.EMPTY_VALUES:
raise ValidationError(self.error_messages['required'])
try:
clean_data.append(field.clean(field_value))
except ValidationError, e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter.
errors.extend(e.messages)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
return out
def compress(self, data_list):
"""
Returns a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError('Subclasses must implement this method.')
class FilePathField(ChoiceField):
def __init__(self, path, match=None, recursive=False, required=True,
widget=None, label=None, initial=None, help_text=None,
*args, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
super(FilePathField, self).__init__(choices=(), required=required,
widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
if self.required:
self.choices = []
else:
self.choices = [("", "---------")]
if self.match is not None:
self.match_re = re.compile(self.match)
if recursive:
for root, dirs, files in os.walk(self.path):
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
else:
try:
for f in os.listdir(self.path):
full_file = os.path.join(self.path, f)
if os.path.isfile(full_file) and (self.match is None or self.match_re.search(f)):
self.choices.append((full_file, f))
except OSError:
pass
self.widget.choices = self.choices
class SplitDateTimeField(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
'invalid_date': _(u'Enter a valid date.'),
'invalid_time': _(u'Enter a valid time.'),
}
def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
localize = kwargs.get('localize', False)
fields = (
DateField(input_formats=input_date_formats,
error_messages={'invalid': errors['invalid_date']},
localize=localize),
TimeField(input_formats=input_time_formats,
error_messages={'invalid': errors['invalid_time']},
localize=localize),
)
super(SplitDateTimeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in validators.EMPTY_VALUES:
raise ValidationError(self.error_messages['invalid_date'])
if data_list[1] in validators.EMPTY_VALUES:
raise ValidationError(self.error_messages['invalid_time'])
return datetime.datetime.combine(*data_list)
return None
class IPAddressField(CharField):
default_error_messages = {
'invalid': _(u'Enter a valid IPv4 address.'),
}
default_validators = [validators.validate_ipv4_address]
class SlugField(CharField):
default_error_messages = {
'invalid': _(u"Enter a valid 'slug' consisting of letters, numbers,"
u" underscores or hyphens."),
}
default_validators = [validators.validate_slug]
| {
"content_hash": "824d16cd752c0e8c1af79dd7ccfb755a",
"timestamp": "",
"source": "github",
"line_count": 924,
"max_line_length": 128,
"avg_line_length": 34.371212121212125,
"alnum_prop": 0.706130545672093,
"repo_name": "tjsavage/rototutor_djangononrel",
"id": "06a1618a6cc76579cc2f4da52073a9b230c8575a",
"size": "31759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/forms/fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "91717"
},
{
"name": "Python",
"bytes": "4115027"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('people', '0005_merge'),
]
operations = [
migrations.AlterField(
model_name='internaluser',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='people.InternalUser'),
),
migrations.AlterField(
model_name='mastersection',
name='label',
field=models.IntegerField(choices=[(1, 'Laundry'), (2, 'Kitchen')]),
),
]
| {
"content_hash": "a81bf96f8abcaa7f8f254533c4269792",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 130,
"avg_line_length": 28,
"alnum_prop": 0.6041666666666666,
"repo_name": "rameshgopalakrishnan/v_excel_inventory",
"id": "bd05c3148a2d5154d83b38409f324a163bd7638e",
"size": "744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "people/migrations/0006_auto_20161113_0906.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "212200"
},
{
"name": "HTML",
"bytes": "333938"
},
{
"name": "JavaScript",
"bytes": "317162"
},
{
"name": "Python",
"bytes": "47233"
}
],
"symlink_target": ""
} |
'''
Copyright (C) 2022, WAFW00F Developers.
See the LICENSE file for copying permission.
'''
NAME = 'Huawei Cloud Firewall (Huawei)'
def is_waf(self):
schemes = [
self.matchCookie(r'^HWWAFSESID='),
self.matchHeader(('Server', r'HuaweiCloudWAF')),
self.matchContent(r'hwclouds\.com'),
self.matchContent(r'hws_security@')
]
if any(i for i in schemes):
return True
return False | {
"content_hash": "0eb09b1dd9726932e1d3dd2432e54338",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 56,
"avg_line_length": 24.055555555555557,
"alnum_prop": 0.6327944572748267,
"repo_name": "EnableSecurity/wafw00f",
"id": "7cb6c0f5e3e11d51847a75008253b198bb125d92",
"size": "455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wafw00f/plugins/huaweicloud.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "97"
},
{
"name": "Makefile",
"bytes": "339"
},
{
"name": "Python",
"bytes": "109477"
}
],
"symlink_target": ""
} |
"""Batch normalization layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import REDACTED.transformer_lingvo.lingvo.compat as tf
from REDACTED.transformer_lingvo.lingvo.core import base_layer
from REDACTED.transformer_lingvo.lingvo.core import py_utils
from REDACTED.transformer_lingvo.lingvo.core import summary_utils
from six.moves import range
from REDACTED.tensorflow.python.ops import nn # pylint:disable=g-direct-tensorflow-import
from REDACTED.tensorflow.python.tpu import tpu_function # pylint:disable=g-direct-tensorflow-import
_BN_FLOPS_PER_ELEMENT = 10
# TODO(rpang): move AddingAccumulator to a separate library.
class AddingAccumulator(base_layer.Accumulator):
"""Accumulator for the sufficient statistics."""
def __init__(self, shape, dtype):
super(AddingAccumulator, self).__init__()
self.dtype = dtype
self.shape = shape
def DefaultValue(self):
"""Returns the default value of the accumulator."""
return tf.zeros(self.shape, dtype=self.dtype)
def Update(self, value):
"""Adds value to the accumulator."""
self.SetValue(self.GetValue() + tf.cast(value, self.dtype))
class BatchNormLayer(base_layer.BaseLayer):
"""Batch normalization layer."""
@classmethod
def Params(cls):
p = super(BatchNormLayer, cls).Params()
p.Define('dim', 0, 'Depth of the input/output.')
p.Define(
'decay', 0.999,
'Decay in updating the mean and variance moving average used in'
' batch normalization.')
p.Define(
'enable_cross_replica_sum_on_tpu', True,
'If true, calls cross_replica_sum to the aggregate moving averages'
' across all replicas.')
p.Define(
'use_moving_avg_in_training', False,
'If True, use global moving avg (mean, variance) during training'
' to avoid mismatch between train and eval, which then'
' essentially acts as an adaptive normalization step.')
p.Define(
'gamma_zero_init', False,
'If True, initialize gamma to zeros according to the technique '
'introduced in the tech report: https://arxiv.org/abs/1706.02677')
# TODO(rpang): remove this hparam, as it is replaced
# by p.train.ema_decay_moving_vars.
p.Define(
'add_stats_to_moving_average_variables', None,
'If True, adds (mean, variance) to the MOVING_AVERAGE_VARIABLES '
'collection to be compatible with ema_decay. '
'Recommendation: set to True for new models, and to False to maintain '
'checkpoint compatibility.')
p.Define('set_padded_output_to_zero', True,
'If True, sets the padded outputs to zero.')
p.Define(
'use_fused_batch_norm_for_eval', False,
'If True, uses tf.compat.v1.nn.fused_batch_norm instead of '
'tf.nn.batch_normalization during eval. The fused version may be more '
'efficient but it has more restrictions on the expected input shapes.'
'The input tensor has to be rank 4, where the first dimension '
'corresponds to the batch, and the last dimension corresponds to the '
'features to normalize over. This usually corresponds to NHWC with '
'image inputs. Note that fused_batch_norm wants to track its own '
'mean and variance during training, so we are unable to use it '
'for training since we want to have a custom mean and variance to '
'support padding.')
return p
@base_layer.initializer
def __init__(self, params):
super(BatchNormLayer, self).__init__(params)
p = self.params
assert p.name
pc = py_utils.WeightParams(
shape=[p.dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
with tf.variable_scope(p.name):
if not p.use_moving_avg_in_training:
self.CreateVariable('beta', pc)
if p.gamma_zero_init:
# zero initialization to BN gamma
self.CreateVariable('gamma', pc)
else:
# Note, The real gamma to use is 1 + gamma.
self.CreateVariable('gamma', pc, lambda x: 1.0 + x)
# Two statistics.
moving_collections = ['moving_vars', self.__class__.__name__ + '_vars']
if p.add_stats_to_moving_average_variables:
moving_collections += [tf.GraphKeys.MOVING_AVERAGE_VARIABLES]
elif p.add_stats_to_moving_average_variables is None:
# TODO(rpang): force all models to set this param explicitly.
tf.logging.warning(
'BatchNormLayer.add_stats_to_moving_average_variables should be '
'set to True for new models, and to False explicitly for '
'checkpoint compatibility.')
# Add to the MOVING_AVERAGE_VARIABLES collection so that they are returned
# by tf.moving_average_variables() and included in EMA variables if
# ema_decay is enabled.
mva = py_utils.WeightParams(
shape=[p.dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=moving_collections)
self.CreateVariable(
'moving_mean',
mva,
trainable=False,
aggregation=tf.VariableAggregation.MEAN)
mvv = py_utils.WeightParams(
shape=[p.dim],
init=py_utils.WeightInit.Constant(1.0),
dtype=p.dtype,
collections=moving_collections)
self.CreateVariable(
'moving_variance',
mvv,
trainable=False,
aggregation=tf.VariableAggregation.MEAN)
self._epsilon = 0.001
self._decay = p.decay
@property
def epsilon(self):
return self._epsilon
@staticmethod
def _Moments(inputs, mask, enable_cross_replica_sum_on_tpu=False):
"""Computes mean and variance over the valid data points in inputs."""
inputs = py_utils.with_dependencies([
py_utils.assert_equal(tf.rank(inputs), tf.rank(mask)),
py_utils.assert_greater_equal(mask, tf.zeros_like(mask)),
], inputs)
rank = tf.rank(mask)
reduce_over_dims = tf.range(0, rank - 1)
sum_v = tf.reduce_sum(inputs * tf.cast(mask, inputs.dtype),
reduce_over_dims)
count_v = tf.reduce_sum(mask, reduce_over_dims)
# Input shape is guaranteed to be a multiple of mask shape because the
# inputs * mask op above was successfully broadcasted.
mask_multiplier = tf.shape(inputs)[:-1] // tf.shape(mask)[:-1]
count_v *= tf.cast(tf.reduce_prod(mask_multiplier), count_v.dtype)
if py_utils.use_tpu() and enable_cross_replica_sum_on_tpu:
sum_v = tf.tpu.cross_replica_sum(sum_v)
count_v = tf.tpu.cross_replica_sum(count_v)
count_v = tf.maximum(count_v, 1.0)
mean = sum_v / count_v
sum_vv = tf.reduce_sum((inputs - mean) * (inputs - mean) * mask,
reduce_over_dims)
if py_utils.use_tpu() and enable_cross_replica_sum_on_tpu:
sum_vv = tf.tpu.cross_replica_sum(sum_vv)
variance = py_utils.with_dependencies([
py_utils.assert_greater_equal(sum_vv, tf.zeros_like(sum_vv)),
], sum_vv / count_v)
return mean, variance
def _GetDefaultPaddings(self, inputs):
"""Gets the default paddings for an input."""
return tf.zeros(
tf.concat([tf.shape(inputs)[:-1], [1]], 0), dtype=inputs.dtype)
def GetCurrentMoments(self, theta):
"""Gets the current computed moments, which should be applied at eval.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
Returns:
Tuple of (mean, variance, beta, gamma).
"""
p = self.params
if p.use_moving_avg_in_training:
return self.vars.moving_mean, self.vars.moving_variance, 0.0, 1.0
else:
return (self.vars.moving_mean, self.vars.moving_variance, theta.beta,
theta.gamma)
def ComputeAndUpdateMoments(self, theta, inputs, paddings=None):
"""Computes moments and updates state.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. Shaped [..., dim].
paddings: The paddings tensor. Shaped [..., 1], with the same rank as the
input tensor.
Returns:
Tuple of (mean, variance, beta, gamma).
"""
p = self.params
if paddings is None:
paddings = self._GetDefaultPaddings(inputs)
inputs = py_utils.with_dependencies([
py_utils.assert_shape_match([tf.shape(paddings)[-1]], [1]),
], inputs)
with tf.name_scope(p.name):
if self.do_eval:
# The mean and variance used for normalization.
norm_mean, norm_variance = (self.vars.moving_mean,
self.vars.moving_variance)
else:
mean, variance = self._Moments(inputs, 1.0 - paddings,
p.enable_cross_replica_sum_on_tpu)
py_utils.UpdateBatchNormVars(self.vars.moving_mean, mean, self._decay)
py_utils.UpdateBatchNormVars(self.vars.moving_variance, variance,
self._decay)
# Add some summaries for visualization.
summary_utils.histogram('%s_mean' % p.name, tf.cast(mean, tf.float32))
summary_utils.histogram('%s_variance' % p.name,
tf.cast(variance, tf.float32))
summary_utils.histogram('%s_moving_mean' % p.name,
tf.cast(self.vars.moving_mean, tf.float32))
summary_utils.histogram('%s_moving_variance' % p.name,
tf.cast(self.vars.moving_variance, tf.float32))
summary_utils.histogram(
'%s_mean_diff' % p.name,
tf.cast(mean - self.vars.moving_mean, tf.float32))
summary_utils.histogram(
'%s_variance_diff' % p.name,
tf.cast(variance - self.vars.moving_variance, tf.float32))
if p.use_moving_avg_in_training:
# Use the global statistics for normalization.
# Control dependencies on mean and variance make sure
# moving_mean and variance will be updated for every training step.
norm_mean = py_utils.with_dependencies([mean], self.vars.moving_mean)
norm_variance = py_utils.with_dependencies([variance],
self.vars.moving_variance)
else:
# Use the batch statistics for normalization.
norm_mean = mean
norm_variance = variance
norm_mean = py_utils.CheckNumerics(
norm_mean, 'mean of %s failed numeric check' % p.name)
norm_variance = py_utils.CheckNumerics(
norm_variance, 'variance of %s failed numeric check' % p.name)
if p.use_moving_avg_in_training:
beta = 0.0
gamma = 1.0
else:
beta = theta.beta
gamma = theta.gamma
return norm_mean, norm_variance, beta, gamma
def FProp(self, theta, inputs, paddings=None):
"""Apply batch normalization.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. Shaped [..., dim].
paddings: The paddings tensor. Shaped [..., 1], with the same rank as the
input tensor.
Returns:
Output after applying batch normalization, with the same shape as
'inputs'.
"""
p = self.params
if paddings is None:
paddings = self._GetDefaultPaddings(inputs)
with tf.name_scope(p.name):
norm_mean, norm_variance, beta, gamma = self.ComputeAndUpdateMoments(
theta, inputs, paddings)
with tf.control_dependencies([
py_utils.assert_greater_equal(norm_variance,
tf.zeros_like(norm_variance)),
py_utils.assert_shape_match([tf.shape(inputs)[-1]],
tf.shape(norm_mean)),
py_utils.assert_shape_match([tf.shape(inputs)[-1]],
tf.shape(norm_variance)),
]):
if p.use_fused_batch_norm_for_eval and self.do_eval:
bn_output, _, _ = nn.fused_batch_norm(
inputs,
gamma,
beta,
norm_mean,
norm_variance,
self._epsilon,
is_training=False)
else:
bn_output = tf.nn.batch_normalization(inputs, norm_mean,
norm_variance, beta, gamma,
self._epsilon)
if p.set_padded_output_to_zero:
bn_output *= 1.0 - paddings
return bn_output
@classmethod
def FPropMeta(cls, p, inputs, padding=None):
py_utils.CheckShapes((inputs,))
return py_utils.NestedMap(
flops=inputs.num_elements() * _BN_FLOPS_PER_ELEMENT,
out_shapes=(inputs,))
class BatchNormLayerNoPadding(base_layer.BaseLayer):
"""Batchnorm layer without padding."""
@classmethod
def Params(cls):
"""Parameters for BatchNormLayerNoPadding."""
p = super(BatchNormLayerNoPadding, cls).Params()
p.Define('dim', 0, 'Depth of the input/output.')
p.Define(
'decay', 0.997,
'Decay in updating the mean and variance moving average used in'
' batch normalization.')
p.Define('epsilon', 0.001,
'Small float added to variance to avoid dividing by zero.')
p.Define(
'bn_group_size', 1,
'The number of shards participating in normalization when distributed'
' batchnorm is used. Only used for TPU.')
return p
@base_layer.initializer
def __init__(self, params):
super(BatchNormLayerNoPadding, self).__init__(params)
p = self.params
assert p.name, 'Name of BatchNormLayerNoPadding is not set.'
p.fprop_dtype = None
# Skip L-P regularization for these variables.
collections = [
self.__class__.__name__ + '_vars', py_utils.SKIP_LP_REGULARIZATION
]
pc = py_utils.WeightParams(
shape=[p.dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=collections)
with tf.variable_scope(p.name):
self.CreateVariable('beta', pc)
# Note, The real gamma to use is 1 + gamma.
self.CreateVariable('gamma', pc, lambda x: 1.0 + x)
moving_collections = [
'moving_vars', tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
self.__class__.__name__ + '_vars'
]
mva = py_utils.WeightParams(
shape=[p.dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=moving_collections)
# Two statistics computed from sufficient stats.
self.CreateVariable('moving_mean', mva, trainable=False)
mvv = py_utils.WeightParams(
shape=[p.dim],
init=py_utils.WeightInit.Constant(1.0),
dtype=p.dtype,
collections=moving_collections)
self.CreateVariable('moving_variance', mvv, trainable=False)
# Accumulate bn sufficient stats over micro-batches.
dim = self.vars.beta.shape[0]
self.RegisterAccumulator('counts', AddingAccumulator([], p.dtype))
self.RegisterAccumulator('mean_ss', AddingAccumulator([dim], p.dtype))
self.RegisterAccumulator('variance_ss', AddingAccumulator([dim], p.dtype))
def PostTrainingStepUpdate(self, global_step):
"""Updates moving_mean, moving_variance after each training step."""
p = self.params
# Get sufficient stats that accumulates over microbatches.
counts = self.accumulators.counts.GetValue()
mean_ss = self.accumulators.mean_ss.GetValue()
variance_ss = self.accumulators.variance_ss.GetValue()
# Compute batch mean and batch variance from sufficient stats
mean, variance = tf.nn.normalize_moments(counts, mean_ss, variance_ss, None)
decay = tf.convert_to_tensor(1.0 - p.decay, p.dtype)
# Update moving_mean, moving_variance from batch mean and batch variance.
with tf.name_scope(p.name) as scope:
with tf.ops.colocate_with(self.vars.moving_mean):
mean_update = tf.assign_sub(
self.vars.moving_mean,
tf.where(
tf.greater(counts, 0.5),
(self.vars.moving_mean - tf.cast(mean, p.dtype)) * decay,
tf.zeros_like(self.vars.moving_mean)),
name='moving_mean_update')
with tf.ops.colocate_with(self.vars.moving_variance):
var_update = tf.assign_sub(
self.vars.moving_variance,
tf.where(
tf.greater(counts, 0.5),
(self.vars.moving_variance - tf.cast(variance, p.dtype)) *
decay, tf.zeros_like(self.vars.moving_variance)),
name='moving_variance_update')
py_utils.CheckNumerics(
self.vars.moving_mean,
'moving mean of {} failed numeric check'.format(scope))
py_utils.CheckNumerics(
self.vars.moving_variance,
'moving variance of {} failed numeric check'.format(scope))
self.accumulators.counts.Reset()
self.accumulators.mean_ss.Reset()
self.accumulators.variance_ss.Reset()
return tf.group(mean_update, var_update)
def _Moments(self, inputs, group_size):
"""Computes mean and variance over N,H,W dimensions in inputs."""
counts, mean_ss, variance_ss, _, = tf.nn.sufficient_statistics(
inputs, axes=[0, 1, 2], keepdims=False)
self.accumulators.counts.Update(counts)
self.accumulators.mean_ss.Update(mean_ss)
self.accumulators.variance_ss.Update(variance_ss)
# Distributed batch norm that computes sufficient statistics from group_size
# replicas. This is useful when batch_size_per_replica is too small to
# compute reliable sufficient statistics.
if py_utils.use_tpu() and group_size > 1:
group_assignment = None
num_shards = tpu_function.get_tpu_context().number_of_shards
if num_shards is not None:
if num_shards < group_size:
raise ValueError('TPU shards={} less than bn_gropu_size={}.'.format(
num_shards, group_size))
if num_shards % group_size:
raise ValueError(
'TPU shards={} not divisible by bn_group_size={}.'.format(
num_shards, group_size))
num_groups = num_shards // group_size
group_assignment = []
for g in range(num_groups):
replica_ids = [g * group_size + i for i in range(group_size)]
group_assignment.append(replica_ids)
counts *= group_size
mean_ss = tf.tpu.cross_replica_sum(mean_ss, group_assignment)
variance_ss = tf.tpu.cross_replica_sum(variance_ss, group_assignment)
# At each micro-step, batch_mean and batch_variance are computed
# to normalize inputs. But they are not used to update moving_mean and
# moving_variance variables until the last micro batch.
mean, variance = tf.nn.normalize_moments(counts, mean_ss, variance_ss, None)
return mean, variance
def FProp(self, theta, inputs):
"""Applies batch normalization.
Using the implementation in github.com/
tensorflow/tpu/blob/master/models/official/amoeba_net/network_utils.py#L550
Args:
theta: A nested map object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. Shaped [..., dim].
Returns:
Output after applying batch normalization, with the same shape as
'inputs'.
"""
p = self.params
inputs_dtype = inputs.dtype
inputs = tf.cast(inputs, p.dtype)
inputs = py_utils.with_dependencies([
py_utils.assert_shape_match([tf.shape(inputs)[-1]], tf.shape(
theta.beta))
], inputs)
with tf.name_scope(p.name) as scope:
if self.do_eval:
outputs = tf.nn.batch_normalization(inputs, theta.moving_mean,
theta.moving_variance, theta.beta,
theta.gamma, p.epsilon)
else:
mean, variance = self._Moments(inputs, p.bn_group_size)
mean = py_utils.CheckNumerics(
mean, 'mean of {} failed numeric check'.format(scope))
variance = py_utils.CheckNumerics(
variance, 'variance of {} failed numeric check'.format(scope))
outputs = tf.nn.batch_normalization(inputs, mean, variance, theta.beta,
theta.gamma, p.epsilon)
outputs.set_shape(inputs.get_shape())
return tf.cast(outputs, inputs_dtype)
@classmethod
def FPropMeta(cls, p, inputs):
"""Returns metadata about the `FProp` computation for this layer."""
py_utils.CheckShapes((inputs,))
return py_utils.NestedMap(
flops=inputs.num_elements() * _BN_FLOPS_PER_ELEMENT,
out_shapes=(inputs,))
| {
"content_hash": "9758e131110c411cebc8ab6d527aef7f",
"timestamp": "",
"source": "github",
"line_count": 510,
"max_line_length": 100,
"avg_line_length": 40.88235294117647,
"alnum_prop": 0.6246043165467626,
"repo_name": "mlperf/training_results_v0.7",
"id": "22c76b5f3c0d8b4bd52886bafce67dd494baa2ce",
"size": "21566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Google/benchmarks/transformer/implementations/transformer-research-TF-tpu-v4-512/lingvo/core/bn_layers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Awk",
"bytes": "14530"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "172914"
},
{
"name": "C++",
"bytes": "13037795"
},
{
"name": "CMake",
"bytes": "113458"
},
{
"name": "CSS",
"bytes": "70255"
},
{
"name": "Clojure",
"bytes": "622652"
},
{
"name": "Cuda",
"bytes": "1974745"
},
{
"name": "Dockerfile",
"bytes": "149523"
},
{
"name": "Groovy",
"bytes": "160449"
},
{
"name": "HTML",
"bytes": "171537"
},
{
"name": "Java",
"bytes": "189275"
},
{
"name": "JavaScript",
"bytes": "98224"
},
{
"name": "Julia",
"bytes": "430755"
},
{
"name": "Jupyter Notebook",
"bytes": "11091342"
},
{
"name": "Lua",
"bytes": "17720"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "215967"
},
{
"name": "Perl",
"bytes": "1551186"
},
{
"name": "PowerShell",
"bytes": "13906"
},
{
"name": "Python",
"bytes": "36943114"
},
{
"name": "R",
"bytes": "134921"
},
{
"name": "Raku",
"bytes": "7280"
},
{
"name": "Ruby",
"bytes": "4930"
},
{
"name": "SWIG",
"bytes": "140111"
},
{
"name": "Scala",
"bytes": "1304960"
},
{
"name": "Shell",
"bytes": "1312832"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "Starlark",
"bytes": "69877"
},
{
"name": "TypeScript",
"bytes": "243012"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-imports
from tensorflowjs import converters
from tensorflowjs import quantization
from tensorflowjs import version
__version__ = version.version
| {
"content_hash": "076fcf91447ceb7dd2e7d8b189706d7b",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 38,
"avg_line_length": 28.1,
"alnum_prop": 0.800711743772242,
"repo_name": "tensorflow/tfjs",
"id": "946f94954b2c5d3c59dee104e63c5200822616df",
"size": "937",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tfjs-converter/python/tensorflowjs/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2165"
},
{
"name": "C",
"bytes": "1149"
},
{
"name": "C++",
"bytes": "511030"
},
{
"name": "CSS",
"bytes": "27067"
},
{
"name": "Dockerfile",
"bytes": "1840"
},
{
"name": "HTML",
"bytes": "132169"
},
{
"name": "Java",
"bytes": "4081"
},
{
"name": "JavaScript",
"bytes": "1200362"
},
{
"name": "Objective-C",
"bytes": "5247"
},
{
"name": "Python",
"bytes": "518704"
},
{
"name": "Ruby",
"bytes": "1981"
},
{
"name": "Shell",
"bytes": "76252"
},
{
"name": "Starlark",
"bytes": "176198"
},
{
"name": "TypeScript",
"bytes": "10878537"
}
],
"symlink_target": ""
} |
"""Test suite for XenAPI."""
import ast
import base64
import contextlib
import cPickle as pickle
import functools
import os
import re
from nova.compute import api as compute_api
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import test
from nova.tests.db import fakes as db_fakes
from nova.tests import fake_network
from nova.tests import fake_utils
import nova.tests.image.fake as fake_image
from nova.tests import matchers
from nova.tests.xenapi import stubs
from nova.virt import fake
from nova.virt.xenapi import agent
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import host
from nova.virt.xenapi import pool
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.config')
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('host', 'nova.config')
CONF.import_opt('network_manager', 'nova.config')
CONF.import_opt('node_availability_zone', 'nova.config')
IMAGE_MACHINE = '1'
IMAGE_KERNEL = '2'
IMAGE_RAMDISK = '3'
IMAGE_RAW = '4'
IMAGE_VHD = '5'
IMAGE_ISO = '6'
IMAGE_FIXTURES = {
IMAGE_MACHINE: {
'image_meta': {'name': 'fakemachine', 'size': 0,
'disk_format': 'ami',
'container_format': 'ami'},
},
IMAGE_KERNEL: {
'image_meta': {'name': 'fakekernel', 'size': 0,
'disk_format': 'aki',
'container_format': 'aki'},
},
IMAGE_RAMDISK: {
'image_meta': {'name': 'fakeramdisk', 'size': 0,
'disk_format': 'ari',
'container_format': 'ari'},
},
IMAGE_RAW: {
'image_meta': {'name': 'fakeraw', 'size': 0,
'disk_format': 'raw',
'container_format': 'bare'},
},
IMAGE_VHD: {
'image_meta': {'name': 'fakevhd', 'size': 0,
'disk_format': 'vhd',
'container_format': 'ovf'},
},
IMAGE_ISO: {
'image_meta': {'name': 'fakeiso', 'size': 0,
'disk_format': 'iso',
'container_format': 'bare'},
},
}
def set_image_fixtures():
image_service = fake_image.FakeImageService()
image_service.images.clear()
for image_id, image_meta in IMAGE_FIXTURES.items():
image_meta = image_meta['image_meta']
image_meta['id'] = image_id
image_service.create(None, image_meta)
def stub_vm_utils_with_vdi_attached_here(function, should_return=True):
"""
vm_utils.with_vdi_attached_here needs to be stubbed out because it
calls down to the filesystem to attach a vdi. This provides a
decorator to handle that.
"""
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
@contextlib.contextmanager
def fake_vdi_attached_here(*args, **kwargs):
fake_dev = 'fakedev'
yield fake_dev
def fake_image_download(*args, **kwargs):
pass
def fake_is_vdi_pv(*args, **kwargs):
return should_return
orig_vdi_attached_here = vm_utils.vdi_attached_here
orig_image_download = fake_image._FakeImageService.download
orig_is_vdi_pv = vm_utils._is_vdi_pv
try:
vm_utils.vdi_attached_here = fake_vdi_attached_here
fake_image._FakeImageService.download = fake_image_download
vm_utils._is_vdi_pv = fake_is_vdi_pv
return function(self, *args, **kwargs)
finally:
vm_utils._is_vdi_pv = orig_is_vdi_pv
fake_image._FakeImageService.download = orig_image_download
vm_utils.vdi_attached_here = orig_vdi_attached_here
return decorated_function
class XenAPIVolumeTestCase(stubs.XenAPITestBase):
"""Unit tests for Volume operations."""
def setUp(self):
super(XenAPIVolumeTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.flags(disable_process_locking=True,
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass')
db_fakes.stub_out_db_instance_api(self.stubs)
self.instance_values = {'id': 1,
'project_id': self.user_id,
'user_id': 'fake',
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
def _create_volume(self, size=0):
"""Create a volume object."""
vol = {}
vol['size'] = size
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['host'] = 'localhost'
vol['availability_zone'] = CONF.node_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
return db.volume_create(self.context, vol)
@staticmethod
def _make_connection_data():
return {
'volume_id': 1,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_portal': '127.0.0.1:3260,fake',
'target_lun': None,
'auth_method': 'CHAP',
'auth_username': 'username',
'auth_password': 'password',
}
@classmethod
def _make_connection_info(cls):
return {
'driver_volume_type': 'iscsi',
'data': cls._make_connection_data()
}
def test_mountpoint_to_number(self):
cases = {
'sda': 0,
'sdp': 15,
'hda': 0,
'hdp': 15,
'vda': 0,
'xvda': 0,
'0': 0,
'10': 10,
'vdq': -1,
'sdq': -1,
'hdq': -1,
'xvdq': -1,
}
for (input, expected) in cases.iteritems():
actual = volume_utils.mountpoint_to_number(input)
self.assertEqual(actual, expected,
'%s yielded %s, not %s' % (input, actual, expected))
def test_parse_volume_info_parsing_auth_details(self):
result = volume_utils.parse_volume_info(
self._make_connection_data())
self.assertEquals('username', result['chapuser'])
self.assertEquals('password', result['chappassword'])
def test_get_device_number_raise_exception_on_wrong_mountpoint(self):
self.assertRaises(
volume_utils.StorageError,
volume_utils.get_device_number,
'dev/sd')
def test_attach_volume(self):
"""This shows how to test Ops classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
instance = db.instance_create(self.context, self.instance_values)
vm = xenapi_fake.create_vm(instance['name'], 'Running')
result = conn.attach_volume(self._make_connection_info(),
instance['name'], '/dev/sdc')
# check that the VM has a VBD attached to it
# Get XenAPI record for VBD
vbds = xenapi_fake.get_all('VBD')
vbd = xenapi_fake.get_record('VBD', vbds[0])
vm_ref = vbd['VM']
self.assertEqual(vm_ref, vm)
def test_attach_volume_raise_exception(self):
"""This shows how to test when exceptions are raised."""
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
{'driver_volume_type': 'nonexist'},
instance['name'],
'/dev/sdc')
class XenAPIVMTestCase(stubs.XenAPITestBase):
"""Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
self.network = importutils.import_object(CONF.network_manager)
self.flags(disable_process_locking=True,
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',)
xenapi_fake.create_local_srs()
xenapi_fake.create_local_pifs()
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', CONF.flat_network_bridge)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stubout_is_vdi_pv(self.stubs)
stubs.stub_out_vm_methods(self.stubs)
fake_utils.stub_out_utils_execute(self.stubs)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
fake_image.stub_out_image_service(self.stubs)
set_image_fixtures()
stubs.stubout_image_service_download(self.stubs)
stubs.stubout_stream_disk(self.stubs)
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, 'inject_instance_metadata',
fake_inject_instance_metadata)
def fake_safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
name_label = "fakenamelabel"
disk_type = "fakedisktype"
virtual_size = 777
return vm_utils.create_vdi(
session, sr_ref, instance, name_label, disk_type,
virtual_size)
self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi)
def tearDown(self):
super(XenAPIVMTestCase, self).tearDown()
fake_image.FakeImageService_reset()
def test_init_host(self):
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
vm = vm_utils._get_this_vm_ref(session)
# Local root disk
vdi0 = xenapi_fake.create_vdi('compute', None)
vbd0 = xenapi_fake.create_vbd(vm, vdi0)
# Instance VDI
vdi1 = xenapi_fake.create_vdi('instance-aaaa', None,
other_config={'nova_instance_uuid': 'aaaa'})
vbd1 = xenapi_fake.create_vbd(vm, vdi1)
# Only looks like instance VDI
vdi2 = xenapi_fake.create_vdi('instance-bbbb', None)
vbd2 = xenapi_fake.create_vbd(vm, vdi2)
self.conn.init_host(None)
self.assertEquals(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2]))
def test_list_instances_0(self):
instances = self.conn.list_instances()
self.assertEquals(instances, [])
def test_get_rrd_server(self):
self.flags(xenapi_connection_url='myscheme://myaddress/')
server_info = vm_utils._get_rrd_server()
self.assertEqual(server_info[0], 'myscheme')
self.assertEqual(server_info[1], 'myaddress')
def test_get_diagnostics(self):
def fake_get_rrd(host, vm_uuid):
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path, 'xenapi/vm_rrd.xml')) as f:
return re.sub(r'\s', '', f.read())
self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
fake_diagnostics = {
'vbd_xvdb_write': '0.0',
'memory_target': '4294967296.0000',
'memory_internal_free': '1415564.0000',
'memory': '4294967296.0000',
'vbd_xvda_write': '0.0',
'cpu0': '0.0042',
'vif_0_tx': '287.4134',
'vbd_xvda_read': '0.0',
'vif_0_rx': '1816.0144',
'vif_2_rx': '0.0',
'vif_2_tx': '0.0',
'vbd_xvdb_read': '0.0',
'last_update': '1328795567',
}
instance = self._create_instance()
expected = self.conn.get_diagnostics(instance)
self.assertThat(fake_diagnostics, matchers.DictMatches(expected))
def test_instance_snapshot_fails_with_no_primary_vdi(self):
def create_bad_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=False,
osvol=False):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': 'fake',
'currently_attached': False}
vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
self.stubs.Set(vm_utils, 'create_vbd', create_bad_vbd)
stubs.stubout_instance_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
image_id = "my_snapshot_id"
self.assertRaises(exception.NovaException, self.conn.snapshot,
self.context, instance, image_id)
def test_instance_snapshot(self):
stubs.stubout_instance_snapshot(self.stubs)
stubs.stubout_is_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
image_id = "my_snapshot_id"
self.conn.snapshot(self.context, instance, image_id)
# Ensure VM was torn down
vm_labels = []
for vm_ref in xenapi_fake.get_all('VM'):
vm_rec = xenapi_fake.get_record('VM', vm_ref)
if not vm_rec["is_control_domain"]:
vm_labels.append(vm_rec["name_label"])
self.assertEquals(vm_labels, [instance['name']])
# Ensure VBDs were torn down
vbd_labels = []
for vbd_ref in xenapi_fake.get_all('VBD'):
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
vbd_labels.append(vbd_rec["vm_name_label"])
self.assertEquals(vbd_labels, [instance['name']])
# Ensure VDIs were torn down
for vdi_ref in xenapi_fake.get_all('VDI'):
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
name_label = vdi_rec["name_label"]
self.assert_(not name_label.endswith('snapshot'))
def create_vm_record(self, conn, os_type, name):
instances = conn.list_instances()
self.assertEquals(instances, [name])
# Get Nova record for VM
vm_info = conn.get_info({'name': name})
# Get XenAPI record for VM
vms = [rec for ref, rec
in xenapi_fake.get_all_records('VM').iteritems()
if not rec['is_control_domain']]
vm = vms[0]
self.vm_info = vm_info
self.vm = vm
def check_vm_record(self, conn, check_injection=False):
# Check that m1.large above turned into the right thing.
instance_type = db.instance_type_get_by_name(conn, 'm1.large')
mem_kib = long(instance_type['memory_mb']) << 10
mem_bytes = str(mem_kib << 10)
vcpus = instance_type['vcpus']
self.assertEquals(self.vm_info['max_mem'], mem_kib)
self.assertEquals(self.vm_info['mem'], mem_kib)
self.assertEquals(self.vm['memory_static_max'], mem_bytes)
self.assertEquals(self.vm['memory_dynamic_max'], mem_bytes)
self.assertEquals(self.vm['memory_dynamic_min'], mem_bytes)
self.assertEquals(self.vm['VCPUs_max'], str(vcpus))
self.assertEquals(self.vm['VCPUs_at_startup'], str(vcpus))
# Check that the VM is running according to Nova
self.assertEquals(self.vm_info['state'], power_state.RUNNING)
# Check that the VM is running according to XenAPI.
self.assertEquals(self.vm['power_state'], 'Running')
if check_injection:
xenstore_data = self.vm['xenstore_data']
self.assertEquals(xenstore_data['vm-data/hostname'], 'test')
key = 'vm-data/networking/DEADBEEF0001'
xenstore_value = xenstore_data[key]
tcpip_data = ast.literal_eval(xenstore_value)
self.assertEquals(tcpip_data,
{'broadcast': '192.168.1.255',
'dns': ['192.168.1.4', '192.168.1.3'],
'gateway': '192.168.1.1',
'gateway_v6': 'fe80::def',
'ip6s': [{'enabled': '1',
'ip': '2001:db8:0:1::1',
'netmask': 64,
'gateway': 'fe80::def'}],
'ips': [{'enabled': '1',
'ip': '192.168.1.100',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'},
{'enabled': '1',
'ip': '192.168.1.101',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'}],
'label': 'test1',
'mac': 'DE:AD:BE:EF:00:01'})
def check_vm_params_for_windows(self):
self.assertEquals(self.vm['platform']['nx'], 'true')
self.assertEquals(self.vm['HVM_boot_params'], {'order': 'dc'})
self.assertEquals(self.vm['HVM_boot_policy'], 'BIOS order')
# check that these are not set
self.assertEquals(self.vm['PV_args'], '')
self.assertEquals(self.vm['PV_bootloader'], '')
self.assertEquals(self.vm['PV_kernel'], '')
self.assertEquals(self.vm['PV_ramdisk'], '')
def check_vm_params_for_linux(self):
self.assertEquals(self.vm['platform']['nx'], 'false')
self.assertEquals(self.vm['PV_args'], '')
self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
# check that these are not set
self.assertEquals(self.vm['PV_kernel'], '')
self.assertEquals(self.vm['PV_ramdisk'], '')
self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '')
def check_vm_params_for_linux_with_external_kernel(self):
self.assertEquals(self.vm['platform']['nx'], 'false')
self.assertEquals(self.vm['PV_args'], 'root=/dev/xvda1')
self.assertNotEquals(self.vm['PV_kernel'], '')
self.assertNotEquals(self.vm['PV_ramdisk'], '')
# check that these are not set
self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '')
def _list_vdis(self):
url = CONF.xenapi_connection_url
username = CONF.xenapi_connection_username
password = CONF.xenapi_connection_password
session = xenapi_conn.XenAPISession(url, username, password,
fake.FakeVirtAPI())
return session.call_xenapi('VDI.get_all')
def _list_vms(self):
url = CONF.xenapi_connection_url
username = CONF.xenapi_connection_username
password = CONF.xenapi_connection_password
session = xenapi_conn.XenAPISession(url, username, password,
fake.FakeVirtAPI())
return session.call_xenapi('VM.get_all')
def _check_vdis(self, start_list, end_list):
for vdi_ref in end_list:
if not vdi_ref in start_list:
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
# If the cache is turned on then the base disk will be
# there even after the cleanup
if 'other_config' in vdi_rec:
if 'image-id' not in vdi_rec['other_config']:
self.fail('Found unexpected VDI:%s' % vdi_ref)
else:
self.fail('Found unexpected VDI:%s' % vdi_ref)
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
hostname="test", architecture="x86-64", instance_id=1,
injected_files=None, check_injection=False,
create_record=True, empty_dns=False):
if injected_files is None:
injected_files = []
# Fake out inject_instance_metadata
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, 'inject_instance_metadata',
fake_inject_instance_metadata)
if create_record:
instance_values = {'id': instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': image_ref,
'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id,
'root_gb': 20,
'instance_type_id': instance_type_id,
'os_type': os_type,
'hostname': hostname,
'architecture': architecture}
instance = db.instance_create(self.context, instance_values)
else:
instance = db.instance_get(self.context, instance_id)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
if empty_dns:
# NOTE(tr3buchet): this is a terrible way to do this...
network_info[0]['network']['subnets'][0]['dns'] = []
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd'}
self.conn.spawn(self.context, instance, image_meta, injected_files,
'herp', network_info)
self.create_vm_record(self.conn, os_type, instance['name'])
self.check_vm_record(self.conn, check_injection)
self.assertTrue(instance['os_type'])
self.assertTrue(instance['architecture'])
def test_spawn_empty_dns(self):
"""Test spawning with an empty dns list"""
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
empty_dns=True)
self.check_vm_params_for_linux()
def test_spawn_not_enough_memory(self):
self.assertRaises(exception.InsufficientFreeMemory,
self._test_spawn,
1, 2, 3, "4") # m1.xlarge
def test_spawn_fail_cleanup_1(self):
"""Simulates an error while downloading an image.
Verifies that the VM and VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
stubs.stubout_fetch_disk_image(self.stubs, raise_failure=True)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_fail_cleanup_2(self):
"""Simulates an error while creating VM record.
Verifies that the VM and VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
stubs.stubout_create_vm(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_fail_cleanup_3(self):
"""Simulates an error while attaching disks.
Verifies that the VM and VDIs created are properly cleaned up.
"""
stubs.stubout_attach_disks(self.stubs)
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
@stub_vm_utils_with_vdi_attached_here
def test_spawn_raw_glance(self):
self._test_spawn(IMAGE_RAW, None, None)
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_linux(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_windows(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_iso_glance(self):
self._test_spawn(IMAGE_ISO, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
stubs.stubout_fetch_disk_image(self.stubs)
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK)
self.check_vm_params_for_linux_with_external_kernel()
def test_spawn_netinject_file(self):
self.flags(flat_injected=True)
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _tee_handler(cmd, **kwargs):
input = kwargs.get('process_input', None)
self.assertNotEqual(input, None)
config = [line.strip() for line in input.split("\n")]
# Find the start of eth0 configuration and check it
index = config.index('auto eth0')
self.assertEquals(config[index + 1:index + 8], [
'iface eth0 inet static',
'address 192.168.1.100',
'netmask 255.255.255.0',
'broadcast 192.168.1.255',
'gateway 192.168.1.1',
'dns-nameservers 192.168.1.3 192.168.1.4',
''])
self._tee_executed = True
return '', ''
def _readlink_handler(cmd_parts, **kwargs):
return os.path.realpath(cmd_parts[2]), ''
fake_utils.fake_execute_set_repliers([
# Capture the tee .../etc/network/interfaces command
(r'tee.*interfaces', _tee_handler),
(r'readlink -nm.*', _readlink_handler),
])
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK,
check_injection=True)
self.assertTrue(self._tee_executed)
def test_spawn_netinject_xenstore(self):
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
# When mounting, create real files under the mountpoint to simulate
# files in the mounted filesystem
# mount point will be the last item of the command list
self._tmpdir = cmd[len(cmd) - 1]
LOG.debug(_('Creating files in %s to simulate guest agent'),
self._tmpdir)
os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
# Touch the file using open
open(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'), 'w').close()
return '', ''
def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
# Umount would normall make files in the m,ounted filesystem
# disappear, so do that here
LOG.debug(_('Removing simulated guest agent files in %s'),
self._tmpdir)
os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'))
os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
os.rmdir(os.path.join(self._tmpdir, 'usr'))
return '', ''
def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
self._tee_executed = True
return '', ''
fake_utils.fake_execute_set_repliers([
(r'mount', _mount_handler),
(r'umount', _umount_handler),
(r'tee.*interfaces', _tee_handler)])
self._test_spawn(1, 2, 3, check_injection=True)
# tee must not run in this case, where an injection-capable
# guest agent is detected
self.assertFalse(self._tee_executed)
def test_spawn_vlanmanager(self):
self.flags(network_manager='nova.network.manager.VlanManager',
vlan_interface='fake0')
def dummy(*args, **kwargs):
pass
self.stubs.Set(vmops.VMOps, '_create_vifs', dummy)
# Reset network table
xenapi_fake.reset_table('network')
# Instance id = 2 will use vlan network (see db/fakes.py)
ctxt = self.context.elevated()
instance = self._create_instance(2, False)
networks = self.network.db.network_get_all(ctxt)
for network in networks:
self.network.set_network_host(ctxt, network)
self.network.allocate_for_instance(ctxt,
instance_id=2,
instance_uuid='00000000-0000-0000-0000-000000000002',
host=CONF.host,
vpn=None,
rxtx_factor=3,
project_id=self.project_id)
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK,
instance_id=2,
create_record=False)
# TODO(salvatore-orlando): a complete test here would require
# a check for making sure the bridge for the VM's VIF is
# consistent with bridge specified in nova db
def test_spawn_with_network_qos(self):
self._create_instance()
for vif_ref in xenapi_fake.get_all('VIF'):
vif_rec = xenapi_fake.get_record('VIF', vif_ref)
self.assertEquals(vif_rec['qos_algorithm_type'], 'ratelimit')
self.assertEquals(vif_rec['qos_algorithm_params']['kbps'],
str(3 * 10 * 1024))
def test_spawn_injected_files(self):
"""Test spawning with injected_files"""
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
injected_files = [('/tmp/foo', 'foobar')]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
injected_files=injected_files)
self.check_vm_params_for_linux()
self.assertEquals(actual_injected_files, injected_files)
def test_rescue(self):
instance = self._create_instance()
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
vm_ref = vm_utils.lookup(session, instance['name'])
swap_vdi_ref = xenapi_fake.create_vdi('swap', None)
root_vdi_ref = xenapi_fake.create_vdi('root', None)
xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=1)
xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd'}
conn.rescue(self.context, instance, [], image_meta, '')
vm = xenapi_fake.get_record('VM', vm_ref)
rescue_name = "%s-rescue" % vm["name_label"]
rescue_ref = vm_utils.lookup(session, rescue_name)
rescue_vm = xenapi_fake.get_record('VM', rescue_ref)
vdi_uuids = []
for vbd_uuid in rescue_vm["VBDs"]:
vdi_uuids.append(xenapi_fake.get_record('VBD', vbd_uuid)["VDI"])
self.assertTrue("swap" not in vdi_uuids)
def test_unrescue(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Unrescue expects the original instance to be powered off
conn.power_off(instance)
rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
'Running')
conn.unrescue(instance, None)
def test_unrescue_not_in_rescue(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Ensure that it will not unrescue a non-rescued instance.
self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue,
instance, None)
def test_finish_revert_migration(self):
instance = self._create_instance()
class VMOpsMock():
def __init__(self):
self.finish_revert_migration_called = False
def finish_revert_migration(self, instance):
self.finish_revert_migration_called = True
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn._vmops = VMOpsMock()
conn.finish_revert_migration(instance, None)
self.assertTrue(conn._vmops.finish_revert_migration_called)
def test_reboot_hard(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(instance, None, "HARD")
def test_reboot_soft(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(instance, None, "SOFT")
def test_reboot_halted(self):
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance['name'], 'Halted')
conn.reboot(instance, None, "SOFT")
vm_ref = vm_utils.lookup(session, instance['name'])
vm = xenapi_fake.get_record('VM', vm_ref)
self.assertEquals(vm['power_state'], 'Running')
def test_reboot_unknown_state(self):
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance['name'], 'Unknown')
self.assertRaises(xenapi_fake.Failure, conn.reboot, instance,
None, "SOFT")
def _test_maintenance_mode(self, find_host, find_aggregate):
real_call_xenapi = self.conn._session.call_xenapi
instance = self._create_instance(spawn=True)
api_calls = {}
# Record all the xenapi calls, and return a fake list of hosts
# for the host.get_all call
def fake_call_xenapi(method, *args):
api_calls[method] = args
if method == 'host.get_all':
return ['foo', 'bar', 'baz']
return real_call_xenapi(method, *args)
self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi)
def fake_aggregate_get(context, host, key):
if find_aggregate:
return [{'fake': 'aggregate'}]
else:
return []
self.stubs.Set(self.conn.virtapi, 'aggregate_get_by_host',
fake_aggregate_get)
def fake_host_find(context, session, src, dst):
if find_host:
return 'bar'
else:
raise exception.NoValidHost("I saw this one coming...")
self.stubs.Set(host, '_host_find', fake_host_find)
result = self.conn.host_maintenance_mode('bar', 'on_maintenance')
self.assertEqual(result, 'on_maintenance')
# We expect the VM.pool_migrate call to have been called to
# migrate our instance to the 'bar' host
expected = (instance['uuid'], 'bar', {})
self.assertTrue(api_calls.get('VM.pool_migrate'), expected)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertTrue(instance['vm_state'], vm_states.ACTIVE)
self.assertTrue(instance['task_state'], task_states.MIGRATING)
def test_maintenance_mode(self):
self._test_maintenance_mode(True, True)
def test_maintenance_mode_no_host(self):
self.assertRaises(exception.NoValidHost,
self._test_maintenance_mode, False, True)
def test_maintenance_mode_no_aggregate(self):
self.assertRaises(exception.NotFound,
self._test_maintenance_mode, True, False)
def test_session_virtapi(self):
was = {'called': False}
def fake_aggregate_get_by_host(self, *args, **kwargs):
was['called'] = True
raise Exception()
self.stubs.Set(self.conn._session._virtapi, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.stubs.Set(self.conn._session, "is_slave", True)
try:
self.conn._session._get_host_uuid()
except Exception:
pass
self.assertTrue(was['called'])
def _create_instance(self, instance_id=1, spawn=True):
"""Creates and spawns a test instance."""
instance_values = {
'id': instance_id,
'uuid': '00000000-0000-0000-0000-00000000000%d' % instance_id,
'display_name': 'host-%d' % instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'vm_mode': 'hvm',
'architecture': 'x86-64'}
instance = db.instance_create(self.context, instance_values)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd'}
if spawn:
self.conn.spawn(self.context, instance, image_meta, [], 'herp',
network_info)
return instance
class XenAPIDiffieHellmanTestCase(test.TestCase):
"""Unit tests for Diffie-Hellman code."""
def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp()
self.alice = agent.SimpleDH()
self.bob = agent.SimpleDH()
def test_shared(self):
alice_pub = self.alice.get_public()
bob_pub = self.bob.get_public()
alice_shared = self.alice.compute_shared(bob_pub)
bob_shared = self.bob.compute_shared(alice_pub)
self.assertEquals(alice_shared, bob_shared)
def _test_encryption(self, message):
enc = self.alice.encrypt(message)
self.assertFalse(enc.endswith('\n'))
dec = self.bob.decrypt(enc)
self.assertEquals(dec, message)
def test_encrypt_simple_message(self):
self._test_encryption('This is a simple message.')
def test_encrypt_message_with_newlines_at_end(self):
self._test_encryption('This message has a newline at the end.\n')
def test_encrypt_many_newlines_at_end(self):
self._test_encryption('Message with lotsa newlines.\n\n\n')
def test_encrypt_newlines_inside_message(self):
self._test_encryption('Message\nwith\ninterior\nnewlines.')
def test_encrypt_with_leading_newlines(self):
self._test_encryption('\n\nMessage with leading newlines.')
def test_encrypt_really_long_message(self):
self._test_encryption(''.join(['abcd' for i in xrange(1024)]))
class XenAPIMigrateInstance(stubs.XenAPITestBase):
"""Unit test for verifying migration-related actions."""
def setUp(self):
super(XenAPIMigrateInstance, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', CONF.flat_network_bridge)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': None,
'ramdisk_id': None,
'root_gb': 5,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
migration_values = {
'source_compute': 'nova-compute',
'dest_compute': 'nova-compute',
'dest_host': '10.127.5.114',
'status': 'post-migrating',
'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7',
'old_instance_type_id': 5,
'new_instance_type_id': 1
}
self.migration = db.migration_create(
context.get_admin_context(), migration_values)
fake_utils.stub_out_utils_execute(self.stubs)
stubs.stub_out_migration_methods(self.stubs)
stubs.stubout_get_this_vm_uuid(self.stubs)
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, 'inject_instance_metadata',
fake_inject_instance_metadata)
def test_resize_xenserver_6(self):
instance = db.instance_create(self.context, self.instance_values)
called = {'resize': False}
def fake_vdi_resize(*args, **kwargs):
called['resize'] = True
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(6, 0, 0),
product_brand='XenServer')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vdi_ref = xenapi_fake.create_vdi('hurr', 'fake')
vdi_uuid = xenapi_fake.get_record('VDI', vdi_ref)['uuid']
conn._vmops._resize_instance(instance,
{'uuid': vdi_uuid, 'ref': vdi_ref})
self.assertEqual(called['resize'], True)
def test_resize_xcp(self):
instance = db.instance_create(self.context, self.instance_values)
called = {'resize': False}
def fake_vdi_resize(*args, **kwargs):
called['resize'] = True
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(1, 4, 99),
product_brand='XCP')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vdi_ref = xenapi_fake.create_vdi('hurr', 'fake')
vdi_uuid = xenapi_fake.get_record('VDI', vdi_ref)['uuid']
conn._vmops._resize_instance(instance,
{'uuid': vdi_uuid, 'ref': vdi_ref})
self.assertEqual(called['resize'], True)
def test_migrate_disk_and_power_off(self):
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', instance_type, None)
def test_migrate_disk_and_power_off_passes_exceptions(self):
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
def fake_raise(*args, **kwargs):
raise exception.MigrationError(reason='test failure')
self.stubs.Set(vmops.VMOps, "_migrate_vhd", fake_raise)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', instance_type, None)
def test_revert_migrate(self):
instance = db.instance_create(self.context, self.instance_values)
self.called = False
self.fake_vm_start_called = False
self.fake_finish_revert_migration_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
def fake_finish_revert_migration(*args, **kwargs):
self.fake_finish_revert_migration_called = True
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(vmops.VMOps, 'finish_revert_migration',
fake_finish_revert_migration)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(4, 0, 0),
product_brand='XenServer')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
base = xenapi_fake.create_vdi('hurr', 'fake')
base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
cow = xenapi_fake.create_vdi('durr', 'fake')
cow_uuid = xenapi_fake.get_record('VDI', cow)['uuid']
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy=base_uuid, cow=cow_uuid),
network_info, image_meta, resize_instance=True)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, True)
conn.finish_revert_migration(instance, network_info)
self.assertEqual(self.fake_finish_revert_migration_called, True)
def test_finish_migrate(self):
instance = db.instance_create(self.context, self.instance_values)
self.called = False
self.fake_vm_start_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(4, 0, 0),
product_brand='XenServer')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, True)
def test_finish_migrate_no_local_storage(self):
tiny_type = instance_types.get_instance_type_by_name('m1.tiny')
tiny_type_id = tiny_type['id']
self.instance_values.update({'instance_type_id': tiny_type_id,
'root_gb': 0})
instance = db.instance_create(self.context, self.instance_values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
def test_finish_migrate_no_resize_vdi(self):
instance = db.instance_create(self.context, self.instance_values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
# Resize instance would be determined by the compute call
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=False)
def test_migrate_no_auto_disk_config_no_resize_down(self):
"""Resize down should fail when auto_disk_config not set"""
instance_values = self.instance_values
instance_values['root_gb'] = 40
instance_values['auto_disk_config'] = False
instance = db.instance_create(self.context, instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
instance_type = db.instance_type_get_by_name(self.context, 'm1.small')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ResizeError,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', instance_type, None)
class XenAPIImageTypeTestCase(test.TestCase):
"""Test ImageType class."""
def test_to_string(self):
"""Can convert from type id to type string."""
self.assertEquals(
vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
vm_utils.ImageType.KERNEL_STR)
def _assert_role(self, expected_role, image_type_id):
self.assertEquals(
expected_role,
vm_utils.ImageType.get_role(image_type_id))
def test_get_image_role_kernel(self):
self._assert_role('kernel', vm_utils.ImageType.KERNEL)
def test_get_image_role_ramdisk(self):
self._assert_role('ramdisk', vm_utils.ImageType.RAMDISK)
def test_get_image_role_disk(self):
self._assert_role('root', vm_utils.ImageType.DISK)
def test_get_image_role_disk_raw(self):
self._assert_role('root', vm_utils.ImageType.DISK_RAW)
def test_get_image_role_disk_vhd(self):
self._assert_role('root', vm_utils.ImageType.DISK_VHD)
class XenAPIDetermineDiskImageTestCase(test.TestCase):
"""Unit tests for code that detects the ImageType."""
def assert_disk_type(self, image_meta, expected_disk_type):
actual = vm_utils.determine_disk_image_type(image_meta)
self.assertEqual(expected_disk_type, actual)
def test_machine(self):
image_meta = {'id': 'a', 'disk_format': 'ami'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK)
def test_raw(self):
image_meta = {'id': 'a', 'disk_format': 'raw'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW)
def test_vhd(self):
image_meta = {'id': 'a', 'disk_format': 'vhd'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)
class CompareVersionTestCase(test.TestCase):
def test_less_than(self):
"""Test that cmp_version compares a as less than b"""
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0)
def test_greater_than(self):
"""Test that cmp_version compares a as greater than b"""
self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0)
def test_equal(self):
"""Test that cmp_version compares a as equal to b"""
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0)
def test_non_lexical(self):
"""Test that cmp_version compares non-lexically"""
self.assertTrue(vmops.cmp_version('1.2.3.10', '1.2.3.4') > 0)
def test_length(self):
"""Test that cmp_version compares by length as last resort"""
self.assertTrue(vmops.cmp_version('1.2.3', '1.2.3.4') < 0)
class XenAPIHostTestCase(stubs.XenAPITestBase):
"""Tests HostState, which holds metrics from XenServer that get
reported back to the Schedulers."""
def setUp(self):
super(XenAPIHostTestCase, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
xenapi_fake.create_local_srs()
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def test_host_state(self):
stats = self.conn.get_host_stats()
self.assertEquals(stats['disk_total'], 40000)
self.assertEquals(stats['disk_used'], 20000)
self.assertEquals(stats['host_memory_total'], 10)
self.assertEquals(stats['host_memory_overhead'], 20)
self.assertEquals(stats['host_memory_free'], 30)
self.assertEquals(stats['host_memory_free_computed'], 40)
def _test_host_action(self, method, action, expected=None):
result = method('host', action)
if not expected:
expected = action
self.assertEqual(result, expected)
def test_host_reboot(self):
self._test_host_action(self.conn.host_power_action, 'reboot')
def test_host_shutdown(self):
self._test_host_action(self.conn.host_power_action, 'shutdown')
def test_host_startup(self):
self.assertRaises(NotImplementedError,
self.conn.host_power_action, 'host', 'startup')
def test_host_maintenance_on(self):
self._test_host_action(self.conn.host_maintenance_mode,
True, 'on_maintenance')
def test_host_maintenance_off(self):
self._test_host_action(self.conn.host_maintenance_mode,
False, 'off_maintenance')
def test_set_enable_host_enable(self):
self._test_host_action(self.conn.set_host_enabled, True, 'enabled')
def test_set_enable_host_disable(self):
self._test_host_action(self.conn.set_host_enabled, False, 'disabled')
def test_get_host_uptime(self):
result = self.conn.get_host_uptime('host')
self.assertEqual(result, 'fake uptime')
def test_supported_instances_is_included_in_host_state(self):
stats = self.conn.get_host_stats()
self.assertTrue('supported_instances' in stats)
def test_supported_instances_is_calculated_by_to_supported_instances(self):
def to_supported_instances(somedata):
self.assertEquals(None, somedata)
return "SOMERETURNVALUE"
self.stubs.Set(host, 'to_supported_instances', to_supported_instances)
stats = self.conn.get_host_stats()
self.assertEquals("SOMERETURNVALUE", stats['supported_instances'])
class ToSupportedInstancesTestCase(test.TestCase):
def test_default_return_value(self):
self.assertEquals([],
host.to_supported_instances(None))
def test_return_value(self):
self.assertEquals([('x86_64', 'xapi', 'xen')],
host.to_supported_instances([u'xen-3.0-x86_64']))
def test_invalid_values_do_not_break(self):
self.assertEquals([('x86_64', 'xapi', 'xen')],
host.to_supported_instances([u'xen-3.0-x86_64', 'spam']))
def test_multiple_values(self):
self.assertEquals(
[
('x86_64', 'xapi', 'xen'),
('x86_32', 'xapi', 'hvm')
],
host.to_supported_instances([u'xen-3.0-x86_64', 'hvm-3.0-x86_32'])
)
class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
def setUp(self):
super(XenAPIAutoDiskConfigTestCase, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True,
osvol=False):
pass
self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
def assertIsPartitionCalled(self, called):
marker = {"partition_called": False}
def fake_resize_part_and_fs(dev, start, old, new):
marker["partition_called"] = True
self.stubs.Set(vm_utils, "_resize_part_and_fs",
fake_resize_part_and_fs)
ctx = context.RequestContext(self.user_id, self.project_id)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
disk_image_type = vm_utils.ImageType.DISK_VHD
instance = db.instance_create(self.context, self.instance_values)
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
vdis, disk_image_type)
self.assertEqual(marker["partition_called"], called)
def test_instance_not_auto_disk_config(self):
"""Should not partition unless instance is marked as
auto_disk_config.
"""
self.instance_values['auto_disk_config'] = False
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_doesnt_pass_fail_safes(self):
"""Should not partition unless fail safes pass"""
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4'), (2, 100, 200, 'ext4')]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_passes_fail_safes(self):
"""Should partition if instance is marked as auto_disk_config=True and
virt-layer specific fail-safe checks pass.
"""
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4')]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(True)
class XenAPIGenerateLocal(stubs.XenAPITestBase):
"""Test generating of local disks, like swap and ephemeral"""
def setUp(self):
super(XenAPIGenerateLocal, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True,
osvol=False):
pass
self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
def assertCalled(self, instance):
ctx = context.RequestContext(self.user_id, self.project_id)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
disk_image_type = vm_utils.ImageType.DISK_VHD
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
self.called = False
self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
vdis, disk_image_type)
self.assertTrue(self.called)
def test_generate_swap(self):
"""Test swap disk generation."""
instance = db.instance_create(self.context, self.instance_values)
instance = db.instance_update(self.context, instance['uuid'],
{'instance_type_id': 5})
# NOTE(danms): because we're stubbing out the instance_types from
# the database, our instance['instance_type'] doesn't get properly
# filled out here, so put what we need into it
instance['instance_type']['swap'] = 1024
def fake_generate_swap(*args, **kwargs):
self.called = True
self.stubs.Set(vm_utils, 'generate_swap', fake_generate_swap)
self.assertCalled(instance)
def test_generate_ephemeral(self):
"""Test ephemeral disk generation."""
instance = db.instance_create(self.context, self.instance_values)
instance = db.instance_update(self.context, instance['uuid'],
{'instance_type_id': 4})
# NOTE(danms): because we're stubbing out the instance_types from
# the database, our instance['instance_type'] doesn't get properly
# filled out here, so put what we need into it
instance['instance_type']['ephemeral_gb'] = 160
def fake_generate_ephemeral(*args):
self.called = True
self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
self.assertCalled(instance)
class XenAPIBWCountersTestCase(stubs.XenAPITestBase):
FAKE_VMS = {'test1:ref': dict(name_label='test1',
other_config=dict(nova_uuid='hash'),
domid='12',
_vifmap={'0': "a:b:c:d...",
'1': "e:f:12:q..."}),
'test2:ref': dict(name_label='test2',
other_config=dict(nova_uuid='hash'),
domid='42',
_vifmap={'0': "a:3:c:d...",
'1': "e:f:42:q..."}),
}
def setUp(self):
super(XenAPIBWCountersTestCase, self).setUp()
self.stubs.Set(vm_utils, 'list_vms',
XenAPIBWCountersTestCase._fake_list_vms)
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def _fake_get_vif_device_map(vm_rec):
return vm_rec['_vifmap']
self.stubs.Set(self.conn._vmops, "_get_vif_device_map",
_fake_get_vif_device_map)
@classmethod
def _fake_list_vms(cls, session):
return cls.FAKE_VMS.iteritems()
@classmethod
def _fake_fetch_bandwidth_mt(cls, session):
return {}
@classmethod
def _fake_fetch_bandwidth(cls, session):
return {'42':
{'0': {'bw_in': 21024, 'bw_out': 22048},
'1': {'bw_in': 231337, 'bw_out': 221212121}},
'12':
{'0': {'bw_in': 1024, 'bw_out': 2048},
'1': {'bw_in': 31337, 'bw_out': 21212121}},
}
def test_get_all_bw_counters(self):
class testinstance(object):
def __init__(self, name, uuid):
self.name = name
self.uuid = uuid
self.stubs.Set(vm_utils, 'fetch_bandwidth',
XenAPIBWCountersTestCase._fake_fetch_bandwidth)
result = self.conn.get_all_bw_counters([testinstance(
name='test1',
uuid='1-2-3'),
testinstance(
name='test2',
uuid='4-5-6')])
self.assertEqual(len(result), 4)
self.assertIn(dict(uuid='1-2-3',
mac_address="a:b:c:d...",
bw_in=1024,
bw_out=2048), result)
self.assertIn(dict(uuid='1-2-3',
mac_address="e:f:12:q...",
bw_in=31337,
bw_out=21212121), result)
self.assertIn(dict(uuid='4-5-6',
mac_address="a:3:c:d...",
bw_in=21024,
bw_out=22048), result)
self.assertIn(dict(uuid='4-5-6',
mac_address="e:f:42:q...",
bw_in=231337,
bw_out=221212121), result)
def test_get_all_bw_counters_in_failure_case(self):
"""Test that get_all_bw_conters returns an empty list when
no data returned from Xenserver. c.f. bug #910045.
"""
class testinstance(object):
def __init__(self):
self.name = "instance-0001"
self.uuid = "1-2-3-4-5"
self.stubs.Set(vm_utils, 'fetch_bandwidth',
XenAPIBWCountersTestCase._fake_fetch_bandwidth_mt)
result = self.conn.get_all_bw_counters([testinstance()])
self.assertEqual(result, [])
# TODO(salvatore-orlando): this class and
# nova.tests.test_libvirt.IPTablesFirewallDriverTestCase share a lot of code.
# Consider abstracting common code in a base class for firewall driver testing.
class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
_in_nat_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
]
_in_filter_rules = [
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
_in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
'# Completed on Tue Jan 18 23:47:56 2011',
]
def setUp(self):
super(XenAPIDom0IptablesFirewallTestCase, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
xenapi_fake.create_local_srs()
xenapi_fake.create_local_pifs()
self.user_id = 'mappin'
self.project_id = 'fake'
stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
test_case=self)
self.context = context.RequestContext(self.user_id, self.project_id)
self.network = importutils.import_object(CONF.network_manager)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.fw = self.conn._vmops.firewall_driver
def _create_instance_ref(self):
return db.instance_create(self.context,
{'user_id': self.user_id,
'project_id': self.project_id,
'instance_type_id': 1})
def _create_test_security_group(self):
admin_ctxt = context.get_admin_context()
secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testgroup',
'description': 'test group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': 8,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'cidr': '192.168.10.0/24'})
return secgroup
def _validate_security_group(self):
in_rules = filter(lambda l: not l.startswith('#'),
self._in_filter_rules)
for rule in in_rules:
if not 'nova' in rule:
self.assertTrue(rule in self._out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
# last two octets change
if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp'
' -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp'
' --icmp-type 8 -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp --dport 80:81'
' -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
def test_static_filters(self):
instance_ref = self._create_instance_ref()
src_instance_ref = self._create_instance_ref()
admin_ctxt = context.get_admin_context()
secgroup = self._create_test_security_group()
src_secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testsourcegroup',
'description': 'src group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'group_id': src_secgroup['id']})
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
src_secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
network_model = fake_network.fake_get_instance_nw_info(self.stubs,
1, spectacular=True)
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
lambda *a, **kw: network_model)
network_info = network_model.legacy()
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
self._validate_security_group()
# Extra test for TCP acceptance rules
for ip in network_model.fixed_ips():
if ip['version'] != 4:
continue
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp'
' --dport 80:81 -s %s' % ip['address'])
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_filters_for_instance_with_ip_v6(self):
self.flags(use_ipv6=True)
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 1)
def test_filters_for_instance_without_ip_v6(self):
self.flags(use_ipv6=False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 0)
def test_multinic_iptables(self):
ipv4_rules_per_addr = 1
ipv4_addr_per_network = 2
ipv6_rules_per_addr = 1
ipv6_addr_per_network = 1
networks_count = 5
instance_ref = self._create_instance_ref()
_get_instance_nw_info = fake_network.fake_get_instance_nw_info
network_info = _get_instance_nw_info(self.stubs,
networks_count,
ipv4_addr_per_network)
ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
ipv4 = self.fw.iptables.ipv4['filter'].rules
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
self.assertEquals(ipv4_network_rules,
ipv4_rules_per_addr * ipv4_addr_per_network * networks_count)
self.assertEquals(ipv6_network_rules,
ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
def test_do_refresh_security_group_rules(self):
admin_ctxt = context.get_admin_context()
instance_ref = self._create_instance_ref()
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
secgroup = self._create_test_security_group()
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.instances[instance_ref['id']] = instance_ref
self._validate_security_group()
# add a rule to the security group
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'udp',
'from_port': 200,
'to_port': 299,
'cidr': '192.168.99.0/24'})
#validate the extra rule
self.fw.refresh_security_group_rules(secgroup)
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299'
' -s 192.168.99.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"Rules were not updated properly."
"The rule for UDP acceptance is missing")
def test_provider_firewall_rules(self):
# setup basic instance data
instance_ref = self._create_instance_ref()
# FRAGILE: as in libvirt tests
# peeks at how the firewall names chains
chain_name = 'inst-%s' % instance_ref['id']
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
self.fw.prepare_instance_filter(instance_ref, network_info)
self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains)
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(0, len(rules))
admin_ctxt = context.get_admin_context()
# add a rule and send the update message, check for 1 rule
provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'tcp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
# Add another, refresh, and make sure number of rules goes to two
provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'udp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(2, len(rules))
# create the instance filter and make sure it has a jump rule
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == chain_name]
jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
provjump_rules = []
# IptablesTable doesn't make rules unique internally
for rule in jump_rules:
if 'provider' in rule.rule and rule not in provjump_rules:
provjump_rules.append(rule)
self.assertEqual(1, len(provjump_rules))
# remove a rule from the db, cast to compute to refresh rule
db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
class XenAPISRSelectionTestCase(stubs.XenAPITestBase):
"""Unit tests for testing we find the right SR."""
def test_safe_find_sr_raise_exception(self):
"""Ensure StorageRepositoryNotFound is raise when wrong filter."""
self.flags(sr_matching_filter='yadayadayada')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
self.assertRaises(exception.StorageRepositoryNotFound,
vm_utils.safe_find_sr, session)
def test_safe_find_sr_local_storage(self):
"""Ensure the default local-storage is found."""
self.flags(sr_matching_filter='other-config:i18n-key=local-storage')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
host_ref = xenapi_fake.get_all('host')[0]
local_sr = xenapi_fake.create_sr(
name_label='Fake Storage',
type='lvm',
other_config={'i18n-original-value-name_label':
'Local storage',
'i18n-key': 'local-storage'},
host_ref=host_ref)
expected = vm_utils.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_by_other_criteria(self):
"""Ensure the SR is found when using a different filter."""
self.flags(sr_matching_filter='other-config:my_fake_sr=true')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
host_ref = xenapi_fake.get_all('host')[0]
local_sr = xenapi_fake.create_sr(name_label='Fake Storage',
type='lvm',
other_config={'my_fake_sr': 'true'},
host_ref=host_ref)
expected = vm_utils.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_default(self):
"""Ensure the default SR is found regardless of other-config."""
self.flags(sr_matching_filter='default-sr:true')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
pool_ref = xenapi_fake.create_pool('')
expected = vm_utils.safe_find_sr(session)
self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref),
expected)
def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
'fake_host2'],
'avail_zone2': ['fake_host3'], }):
for avail_zone, hosts in values.iteritems():
for host in hosts:
db.service_create(context,
{'host': host,
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0,
'availability_zone': avail_zone})
return values
class XenAPIAggregateTestCase(stubs.XenAPITestBase):
"""Unit tests for aggregate operations."""
def setUp(self):
super(XenAPIAggregateTestCase, self).setUp()
self.flags(xenapi_connection_url='http://test_url',
xenapi_connection_username='test_user',
xenapi_connection_password='test_pass',
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host',
compute_driver='xenapi.XenAPIDriver',
node_availability_zone='avail_zone1')
self.flags(use_local=True, group='conductor')
host_ref = xenapi_fake.get_all('host')[0]
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.compute = importutils.import_object(CONF.compute_manager)
self.api = compute_api.AggregateAPI()
values = {'name': 'test_aggr',
'availability_zone': 'test_zone',
'metadata': {pool_states.POOL_FLAG: 'XenAPI'}}
self.aggr = db.aggregate_create(self.context, values)
self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI',
'master_compute': 'host',
pool_states.KEY: pool_states.ACTIVE,
'host': xenapi_fake.get_record('host',
host_ref)['uuid']}
def test_pool_add_to_aggregate_called_by_driver(self):
calls = []
def pool_add_to_aggregate(context, aggregate, host, slave_info=None):
self.assertEquals("CONTEXT", context)
self.assertEquals("AGGREGATE", aggregate)
self.assertEquals("HOST", host)
self.assertEquals("SLAVEINFO", slave_info)
calls.append(pool_add_to_aggregate)
self.stubs.Set(self.conn._pool,
"add_to_aggregate",
pool_add_to_aggregate)
self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST",
slave_info="SLAVEINFO")
self.assertTrue(pool_add_to_aggregate in calls)
def test_pool_remove_from_aggregate_called_by_driver(self):
calls = []
def pool_remove_from_aggregate(context, aggregate, host,
slave_info=None):
self.assertEquals("CONTEXT", context)
self.assertEquals("AGGREGATE", aggregate)
self.assertEquals("HOST", host)
self.assertEquals("SLAVEINFO", slave_info)
calls.append(pool_remove_from_aggregate)
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
pool_remove_from_aggregate)
self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST",
slave_info="SLAVEINFO")
self.assertTrue(pool_remove_from_aggregate in calls)
def test_add_to_aggregate_for_first_host_sets_metadata(self):
def fake_init_pool(id, name):
fake_init_pool.called = True
self.stubs.Set(self.conn._pool, "_init_pool", fake_init_pool)
aggregate = self._aggregate_setup()
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate['id'])
self.assertTrue(fake_init_pool.called)
self.assertThat(self.fake_metadata,
matchers.DictMatches(result['metadetails']))
def test_join_slave(self):
"""Ensure join_slave gets called when the request gets to master."""
def fake_join_slave(id, compute_uuid, host, url, user, password):
fake_join_slave.called = True
self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.conn._pool.add_to_aggregate(self.context, aggregate, "host2",
dict(compute_uuid='fake_uuid',
url='fake_url',
user='fake_user',
passwd='fake_pass',
xenhost_uuid='fake_uuid'))
self.assertTrue(fake_join_slave.called)
def test_add_to_aggregate_first_host(self):
def fake_pool_set_name_label(self, session, pool_ref, name):
fake_pool_set_name_label.called = True
self.stubs.Set(xenapi_fake.SessionBase, "pool_set_name_label",
fake_pool_set_name_label)
self.conn._session.call_xenapi("pool.create", {"name": "asdf"})
values = {"name": 'fake_aggregate',
"availability_zone": 'fake_zone'}
result = db.aggregate_create(self.context, values)
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.CREATED}
db.aggregate_metadata_add(self.context, result['id'], metadata)
db.aggregate_host_add(self.context, result['id'], "host")
aggregate = db.aggregate_get(self.context, result['id'])
self.assertEqual(["host"], aggregate['hosts'])
self.assertEqual(metadata, aggregate['metadetails'])
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
self.assertTrue(fake_pool_set_name_label.called)
def test_remove_from_aggregate_called(self):
def fake_remove_from_aggregate(context, aggregate, host):
fake_remove_from_aggregate.called = True
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
fake_remove_from_aggregate)
self.conn.remove_from_aggregate(None, None, None)
self.assertTrue(fake_remove_from_aggregate.called)
def test_remove_from_empty_aggregate(self):
result = self._aggregate_setup()
self.assertRaises(exception.InvalidAggregateAction,
self.conn._pool.remove_from_aggregate,
self.context, result, "test_host")
def test_remove_slave(self):
"""Ensure eject slave gets called."""
def fake_eject_slave(id, compute_uuid, host_uuid):
fake_eject_slave.called = True
self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave)
self.fake_metadata['host2'] = 'fake_host2_uuid'
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2")
self.assertTrue(fake_eject_slave.called)
def test_remove_master_solo(self):
"""Ensure metadata are cleared after removal."""
def fake_clear_pool(id):
fake_clear_pool.called = True
self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool)
aggregate = self._aggregate_setup(metadata=self.fake_metadata)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate['id'])
self.assertTrue(fake_clear_pool.called)
self.assertThat({pool_states.POOL_FLAG: 'XenAPI',
pool_states.KEY: pool_states.ACTIVE},
matchers.DictMatches(result['metadetails']))
def test_remote_master_non_empty_pool(self):
"""Ensure AggregateError is raised if removing the master."""
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.assertRaises(exception.InvalidAggregateAction,
self.conn._pool.remove_from_aggregate,
self.context, aggregate, "host")
def _aggregate_setup(self, aggr_name='fake_aggregate',
aggr_zone='fake_zone',
aggr_state=pool_states.CREATED,
hosts=['host'], metadata=None):
values = {"name": aggr_name,
"availability_zone": aggr_zone}
result = db.aggregate_create(self.context, values)
pool_flag = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: aggr_state}
db.aggregate_metadata_add(self.context, result['id'], pool_flag)
for host in hosts:
db.aggregate_host_add(self.context, result['id'], host)
if metadata:
db.aggregate_metadata_add(self.context, result['id'], metadata)
return db.aggregate_get(self.context, result['id'])
def test_add_host_to_aggregate_invalid_changing_status(self):
"""Ensure InvalidAggregateAction is raised when adding host while
aggregate is not ready."""
aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.add_to_aggregate, self.context,
aggregate, 'host')
def test_add_host_to_aggregate_invalid_dismissed_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
deleted."""
aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.add_to_aggregate, self.context,
aggregate, 'fake_host')
def test_add_host_to_aggregate_invalid_error_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
in error."""
aggregate = self._aggregate_setup(aggr_state=pool_states.ERROR)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.add_to_aggregate, self.context,
aggregate, 'fake_host')
def test_remove_host_from_aggregate_error(self):
"""Ensure we can remove a host from an aggregate even if in error."""
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
# let's mock the fact that the aggregate is ready!
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
db.aggregate_metadata_add(self.context, aggr['id'], metadata)
for host in values[fake_zone]:
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], host)
# let's mock the fact that the aggregate is in error!
status = {'operational_state': pool_states.ERROR}
expected = self.api.remove_host_from_aggregate(self.context,
aggr['id'],
values[fake_zone][0])
self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
self.assertEqual(expected['metadata'][pool_states.KEY],
pool_states.ACTIVE)
def test_remove_host_from_aggregate_invalid_dismissed_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
deleted."""
aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.remove_from_aggregate, self.context,
aggregate, 'fake_host')
def test_remove_host_from_aggregate_invalid_changing_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
changing."""
aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.remove_from_aggregate, self.context,
aggregate, 'fake_host')
def test_add_aggregate_host_raise_err(self):
"""Ensure the undo operation works correctly on add."""
def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
raise exception.AggregateError(
aggregate_id='', action='', reason='')
self.stubs.Set(self.compute.driver, "add_to_aggregate",
fake_driver_add_to_aggregate)
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
db.aggregate_metadata_add(self.context, self.aggr['id'], metadata)
db.aggregate_host_add(self.context, self.aggr['id'], 'fake_host')
self.assertRaises(exception.AggregateError,
self.compute.add_aggregate_host,
self.context, "fake_host",
aggregate=jsonutils.to_primitive(self.aggr))
excepted = db.aggregate_get(self.context, self.aggr['id'])
self.assertEqual(excepted['metadetails'][pool_states.KEY],
pool_states.ERROR)
self.assertEqual(excepted['hosts'], [])
class MockComputeAPI(object):
def __init__(self):
self._mock_calls = []
def add_aggregate_host(self, ctxt, aggregate,
host_param, host, slave_info):
self._mock_calls.append((
self.add_aggregate_host, ctxt, aggregate,
host_param, host, slave_info))
def remove_aggregate_host(self, ctxt, aggregate_id, host_param,
host, slave_info):
self._mock_calls.append((
self.remove_aggregate_host, ctxt, aggregate_id,
host_param, host, slave_info))
class StubDependencies(object):
"""Stub dependencies for ResourcePool"""
def __init__(self):
self.compute_rpcapi = MockComputeAPI()
def _is_hv_pool(self, *_ignore):
return True
def _get_metadata(self, *_ignore):
return {
pool_states.KEY: {},
'master_compute': 'master'
}
def _create_slave_info(self, *ignore):
return "SLAVE_INFO"
class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool):
""" A ResourcePool, use stub dependencies """
class HypervisorPoolTestCase(test.TestCase):
fake_aggregate = {
'id': 98,
'hosts': [],
'metadetails': {
'master_compute': 'master',
pool_states.POOL_FLAG: {},
pool_states.KEY: {}
}
}
def test_slave_asks_master_to_add_slave_to_pool(self):
slave = ResourcePoolWithStubs()
slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave")
self.assertIn(
(slave.compute_rpcapi.add_aggregate_host,
"CONTEXT", jsonutils.to_primitive(self.fake_aggregate),
"slave", "master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
def test_slave_asks_master_to_remove_slave_from_pool(self):
slave = ResourcePoolWithStubs()
slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave")
self.assertIn(
(slave.compute_rpcapi.remove_aggregate_host,
"CONTEXT", 98, "slave", "master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
class SwapXapiHostTestCase(test.TestCase):
def test_swapping(self):
self.assertEquals(
"http://otherserver:8765/somepath",
pool.swap_xapi_host(
"http://someserver:8765/somepath", 'otherserver'))
def test_no_port(self):
self.assertEquals(
"http://otherserver/somepath",
pool.swap_xapi_host(
"http://someserver/somepath", 'otherserver'))
def test_no_path(self):
self.assertEquals(
"http://otherserver",
pool.swap_xapi_host(
"http://someserver", 'otherserver'))
class VmUtilsTestCase(test.TestCase):
"""Unit tests for xenapi utils."""
def test_upload_image(self):
def fake_instance_system_metadata_get(context, uuid):
return dict(image_a=1, image_b=2, image_c='c', d='d')
def fake_get_sr_path(session):
return "foo"
class FakeInstance(dict):
def __init__(self):
super(FakeInstance, self).__init__({
'auto_disk_config': 'auto disk config',
'os_type': 'os type'})
def __missing__(self, item):
return "whatever"
class FakeSession(object):
def call_plugin(session_self, service, command, kwargs):
self.kwargs = kwargs
def call_plugin_serialized(session_self, service, command, *args,
**kwargs):
self.kwargs = kwargs
def fake_dumps(thing):
return thing
self.stubs.Set(db, "instance_system_metadata_get",
fake_instance_system_metadata_get)
self.stubs.Set(vm_utils, "get_sr_path", fake_get_sr_path)
self.stubs.Set(pickle, "dumps", fake_dumps)
ctx = context.get_admin_context()
instance = FakeInstance()
session = FakeSession()
vm_utils.upload_image(ctx, session, instance, "vmi uuids", "image id")
actual = self.kwargs['properties']
# Inheritance happens in another place, now
expected = dict(auto_disk_config='auto disk config',
os_type='os type')
self.assertEquals(expected, actual)
class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
"""Unit tests for live_migration."""
def setUp(self):
super(XenAPILiveMigrateTestCase, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host')
db_fakes.stub_out_db_instance_api(self.stubs)
self.context = context.get_admin_context()
xenapi_fake.create_local_pifs()
def test_live_migration_calls_vmops(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_live_migrate(context, instance_ref, dest, post_method,
recover_method, block_migration, migrate_data):
fake_live_migrate.called = True
self.stubs.Set(self.conn._vmops, "live_migrate", fake_live_migrate)
self.conn.live_migration(None, None, None, None, None)
self.assertTrue(fake_live_migrate.called)
def test_pre_live_migration(self):
# ensure method is present
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.conn.pre_live_migration(None, None, None, None)
def test_post_live_migration_at_destination(self):
# ensure method is present
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.conn.post_live_migration_at_destination(None, None, None, None)
def test_check_can_live_migrate_destination_with_block_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
expected = {'block_migration': True,
'migrate_data': {
'migrate_send_data': "fake_migrate_data",
'destination_sr_ref': 'asdf'
}
}
result = self.conn.check_can_live_migrate_destination(self.context,
{'host': 'host'},
{}, {},
True, False)
self.assertEqual(expected, result)
def test_check_can_live_migrate_destination_block_migration_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'},
{}, {},
True, False)
def test_check_can_live_migrate_source_with_block_migrate(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
pass
self.stubs.Set(self.conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
self.conn.check_can_live_migrate_source(self.context,
{'host': 'host'},
dest_check_data)
def test_check_can_live_migrate_source_with_block_migrate_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
pass
self.stubs.Set(self.conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_source,
self.context,
{'host': 'host'},
dest_check_data)
def test_check_can_live_migrate_works(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
class fake_aggregate:
def __init__(self):
self.metadetails = {"host": "test_host_uuid"}
def fake_aggregate_get_by_host(context, host, key=None):
self.assertEqual(CONF.host, host)
return [fake_aggregate()]
self.stubs.Set(db, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.conn.check_can_live_migrate_destination(self.context,
{'host': 'host'}, False, False)
def test_check_can_live_migrate_fails(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
class fake_aggregate:
def __init__(self):
self.metadetails = {"dest_other": "test_host_uuid"}
def fake_aggregate_get_by_host(context, host, key=None):
self.assertEqual(CONF.host, host)
return [fake_aggregate()]
self.stubs.Set(db, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'}, None, None)
def test_live_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_get_host_opaque_ref(context, destination_hostname):
return "fake_host"
self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def post_method(context, instance, destination_hostname,
block_migration):
post_method.called = True
self.conn.live_migration(self.conn, None, None, post_method, None)
self.assertTrue(post_method.called, "post_method.called")
def test_live_migration_on_failure(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_get_host_opaque_ref(context, destination_hostname):
return "fake_host"
self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def fake_call_xenapi(*args):
raise NotImplementedError()
self.stubs.Set(self.conn._vmops._session, "call_xenapi",
fake_call_xenapi)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
self.assertRaises(NotImplementedError, self.conn.live_migration,
self.conn, None, None, None, recover_method)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migration_calls_post_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
pass
self.stubs.Set(self.conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def post_method(context, instance, destination_hostname,
block_migration):
post_method.called = True
# pass block_migration = True and migrate data
migrate_data = {"destination_sr_ref": "foo",
"migrate_send_data": "bar"}
self.conn.live_migration(self.conn, None, None, post_method, None,
True, migrate_data)
self.assertTrue(post_method.called, "post_method.called")
def test_live_migration_with_block_migration_raises_invalid_param(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
# pass block_migration = True and no migrate data
self.assertRaises(exception.InvalidParameterValue,
self.conn.live_migration, self.conn,
None, None, None, recover_method, True, None)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migration_with_block_migration_fails_migrate_send(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
pass
self.stubs.Set(self.conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
# pass block_migration = True and migrate data
migrate_data = dict(destination_sr_ref='foo', migrate_send_data='bar')
self.assertRaises(exception.MigrationError,
self.conn.live_migration, self.conn,
None, None, None, recover_method, True, migrate_data)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migrate_block_migration_xapi_call_parameters(self):
fake_vdi_map = object()
class Session(xenapi_fake.SessionBase):
def VM_migrate_send(self_, session, vmref, migrate_data, islive,
vdi_map, vif_map, options):
self.assertEquals('SOMEDATA', migrate_data)
self.assertEquals(fake_vdi_map, vdi_map)
stubs.stubout_session(self.stubs, Session)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
return fake_vdi_map
self.stubs.Set(conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
def dummy_callback(*args, **kwargs):
pass
conn.live_migration(
self.context, instance_ref=dict(name='ignore'), dest=None,
post_method=dummy_callback, recover_method=dummy_callback,
block_migration="SOMEDATA",
migrate_data=dict(migrate_send_data='SOMEDATA',
destination_sr_ref="TARGET_SR_OPAQUE_REF"))
def test_generate_vdi_map(self):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = "fake_vm_ref"
def fake_find_sr(_session):
self.assertEquals(conn._session, _session)
return "source_sr_ref"
self.stubs.Set(vm_utils, "safe_find_sr", fake_find_sr)
def fake_get_instance_vdis_for_sr(_session, _vm_ref, _sr_ref):
self.assertEquals(conn._session, _session)
self.assertEquals(vm_ref, _vm_ref)
self.assertEquals("source_sr_ref", _sr_ref)
return ["vdi0", "vdi1"]
self.stubs.Set(vm_utils, "get_instance_vdis_for_sr",
fake_get_instance_vdis_for_sr)
result = conn._vmops._generate_vdi_map("dest_sr_ref", vm_ref)
self.assertEquals({"vdi0": "dest_sr_ref",
"vdi1": "dest_sr_ref"}, result)
class XenAPIInjectMetadataTestCase(stubs.XenAPITestBase):
def setUp(self):
super(XenAPIInjectMetadataTestCase, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.xenstore = dict(persist={}, ephem={})
def fake_get_vm_opaque_ref(inst, instance):
self.assertEqual(instance, 'instance')
return 'vm_ref'
def fake_add_to_param_xenstore(inst, vm_ref, key, val):
self.assertEqual(vm_ref, 'vm_ref')
self.xenstore['persist'][key] = val
def fake_remove_from_param_xenstore(inst, vm_ref, key):
self.assertEqual(vm_ref, 'vm_ref')
if key in self.xenstore['persist']:
del self.xenstore['persist'][key]
def fake_write_to_xenstore(inst, instance, path, value, vm_ref=None):
self.assertEqual(instance, 'instance')
self.assertEqual(vm_ref, 'vm_ref')
self.xenstore['ephem'][path] = jsonutils.dumps(value)
def fake_delete_from_xenstore(inst, instance, path, vm_ref=None):
self.assertEqual(instance, 'instance')
self.assertEqual(vm_ref, 'vm_ref')
if path in self.xenstore['ephem']:
del self.xenstore['ephem'][path]
self.stubs.Set(vmops.VMOps, '_get_vm_opaque_ref',
fake_get_vm_opaque_ref)
self.stubs.Set(vmops.VMOps, '_add_to_param_xenstore',
fake_add_to_param_xenstore)
self.stubs.Set(vmops.VMOps, '_remove_from_param_xenstore',
fake_remove_from_param_xenstore)
self.stubs.Set(vmops.VMOps, '_write_to_xenstore',
fake_write_to_xenstore)
self.stubs.Set(vmops.VMOps, '_delete_from_xenstore',
fake_delete_from_xenstore)
def test_inject_instance_metadata(self):
# Add some system_metadata to ensure it doesn't get added
# to xenstore
instance = dict(metadata=[{'key': 'a', 'value': 1},
{'key': 'b', 'value': 2},
{'key': 'c', 'value': 3},
# Check xenstore key sanitizing
{'key': 'hi.there', 'value': 4},
{'key': 'hi!t.e/e', 'value': 5}],
# Check xenstore key sanitizing
system_metadata=[{'key': 'sys_a', 'value': 1},
{'key': 'sys_b', 'value': 2},
{'key': 'sys_c', 'value': 3}])
self.conn._vmops.inject_instance_metadata(instance, 'vm_ref')
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/hi_there': '4',
'vm-data/user-metadata/hi_t_e_e': '5',
},
'ephem': {},
})
def test_change_instance_metadata_add(self):
# Test XenStore key sanitizing here, too.
diff = {'test.key': ['+', 4]}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata('instance', diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/test_key': '4',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/test_key': '4',
},
})
def test_change_instance_metadata_update(self):
diff = dict(b=['+', 4])
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata('instance', diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '4',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '4',
'vm-data/user-metadata/c': '3',
},
})
def test_change_instance_metadata_delete(self):
diff = dict(b=['-'])
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata('instance', diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/c': '3',
},
})
class VMOpsTestCase(test.TestCase):
def _get_mock_session(self, product_brand, product_version):
class Mock(object):
pass
mock_session = Mock()
mock_session.product_brand = product_brand
mock_session.product_version = product_version
return mock_session
def test_check_resize_func_name_defaults_to_VDI_resize(self):
session = self._get_mock_session(None, None)
ops = vmops.VMOps(session, fake.FakeVirtAPI())
self.assertEquals(
'VDI.resize',
ops.check_resize_func_name())
class XenAPISessionTestCase(test.TestCase):
def _get_mock_xapisession(self, software_version):
class XcpXapiSession(xenapi_conn.XenAPISession):
def __init__(_ignore):
"Skip the superclass's dirty init"
def _get_software_version(_ignore):
return software_version
return XcpXapiSession()
def test_get_product_version_product_brand_does_not_fail(self):
session = self._get_mock_xapisession({
'build_number': '0',
'date': '2012-08-03',
'hostname': 'komainu',
'linux': '3.2.0-27-generic',
'network_backend': 'bridge',
'platform_name': 'XCP_Kronos',
'platform_version': '1.6.0',
'xapi': '1.3',
'xen': '4.1.2',
'xencenter_max': '1.10',
'xencenter_min': '1.10'
})
self.assertEquals(
(None, None),
session._get_product_version_and_brand()
)
def test_get_product_version_product_brand_xs_6(self):
session = self._get_mock_xapisession({
'product_brand': 'XenServer',
'product_version': '6.0.50'
})
self.assertEquals(
((6, 0, 50), 'XenServer'),
session._get_product_version_and_brand()
)
| {
"content_hash": "bea0504edfad936b3e9cf2dfc6349175",
"timestamp": "",
"source": "github",
"line_count": 3047,
"max_line_length": 79,
"avg_line_length": 42.51361995405317,
"alnum_prop": 0.5543735863330734,
"repo_name": "aristanetworks/arista-ovs-nova",
"id": "b226b34df5ec2106efee8270bd3719661ce4ac7e",
"size": "130204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/test_xenapi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "6938504"
},
{
"name": "Shell",
"bytes": "16524"
}
],
"symlink_target": ""
} |
import io
import unittest
from PyPDF2 import PdfFileReader
import frappe
import frappe.utils.pdf as pdfgen
class TestPdf(unittest.TestCase):
@property
def html(self):
return """<style>
.print-format {
margin-top: 0mm;
margin-left: 10mm;
margin-right: 0mm;
}
</style>
<p>This is a test html snippet</p>
<div class="more-info">
<a href="http://test.com">Test link 1</a>
<a href="/about">Test link 2</a>
<a href="login">Test link 3</a>
<img src="/assets/frappe/test.jpg">
</div>
<div style="background-image: url('/assets/frappe/bg.jpg')">
Please mail us at <a href="mailto:test@example.com">email</a>
</div>"""
def runTest(self):
self.test_read_options_from_html()
def test_read_options_from_html(self):
_, html_options = pdfgen.read_options_from_html(self.html)
self.assertTrue(html_options['margin-top'] == '0')
self.assertTrue(html_options['margin-left'] == '10')
self.assertTrue(html_options['margin-right'] == '0')
def test_pdf_encryption(self):
password = "qwe"
pdf = pdfgen.get_pdf(self.html, options={"password": password})
reader = PdfFileReader(io.BytesIO(pdf))
self.assertTrue(reader.isEncrypted)
self.assertTrue(reader.decrypt(password))
def test_pdf_generation_as_a_user(self):
frappe.set_user("Administrator")
pdf = pdfgen.get_pdf(self.html)
self.assertTrue(pdf)
| {
"content_hash": "58819965cf66352010ba7b2469a9e6ca",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 65,
"avg_line_length": 27.46,
"alnum_prop": 0.6766205389657683,
"repo_name": "almeidapaulopt/frappe",
"id": "f23db32845eff16bebb9e786b0faaec44567f415",
"size": "1470",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/tests/test_pdf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "67734"
},
{
"name": "HTML",
"bytes": "245760"
},
{
"name": "JavaScript",
"bytes": "2345089"
},
{
"name": "Less",
"bytes": "25489"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "3436599"
},
{
"name": "SCSS",
"bytes": "248606"
},
{
"name": "Shell",
"bytes": "3505"
},
{
"name": "Vue",
"bytes": "96912"
}
],
"symlink_target": ""
} |
import matplotlib
x=randn(10000)
hist(x,100)
sql_s='select count(*) from grid_quarters_public where quarter="2010_q2"';
| {
"content_hash": "df46aaaf8b5f44e7b56572e4f896ba05",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 74,
"avg_line_length": 24.2,
"alnum_prop": 0.743801652892562,
"repo_name": "tgebru/demographics_code",
"id": "3e095a521759443f704aaa8120b52b22e21239c9",
"size": "121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analysis.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AspectJ",
"bytes": "7321"
},
{
"name": "Java",
"bytes": "2970270"
},
{
"name": "Matlab",
"bytes": "18118"
},
{
"name": "Python",
"bytes": "121"
},
{
"name": "XSLT",
"bytes": "4576"
}
],
"symlink_target": ""
} |
from ev3dev.auto import OUTPUT_A, OUTPUT_B, OUTPUT_D, LargeMotor, MediumMotor
from ev3dev.auto import INPUT_1, INPUT_2, ColorSensor, UltrasonicSensor
import time
import ev3dev.ev3 as ev3
ultrasonicSensor = UltrasonicSensor(INPUT_1)
colorSensor = ColorSensor(INPUT_2)
clawMotor = MediumMotor(OUTPUT_B)
leftTire = LargeMotor(OUTPUT_A)
rightTire = LargeMotor(OUTPUT_D)
def getUltrasonic():
ultrasonicSensor.mode='US-DIS-CM'
return ultrasonicSensor.units
def getColor():
colorSensor.mode='COL-REFLECT'
return colorSensor.value()
#def findObject():
while getUltrasonic > 5.5:
leftTire.run_timed(power=15, rotations=0.2)
rightTire.run_timed(power=15, rotations=0.2)
time.sleep(1)
clawMotor.run_timed(power=75, rotations=0.8)
time.sleep(1)
#def findTarget():
while getColor > 15:
leftTire.run_timed(power=15, rotations=0.2)
rightTire.run_timed(power=15, rotations=0.2)
time.sleep(1)
clawMotor.run_timed(power=75, rotations=-0.8)
time.sleep(1) | {
"content_hash": "4c66d607f4a632158560414bf1e8d5bf",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 77,
"avg_line_length": 28.02857142857143,
"alnum_prop": 0.746177370030581,
"repo_name": "chandlercr/aima-python",
"id": "a2415761f5198be80892613f914660d22cfa4315",
"size": "981",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "submissions/Kinley/myLegos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "856"
},
{
"name": "HTML",
"bytes": "9947"
},
{
"name": "JavaScript",
"bytes": "10165"
},
{
"name": "Jupyter Notebook",
"bytes": "1382354"
},
{
"name": "Makefile",
"bytes": "562"
},
{
"name": "Python",
"bytes": "1917198"
}
],
"symlink_target": ""
} |
"""
Parsing functions for Binvox files.
https://www.patrickmin.com/binvox/binvox.html
Exporting meshes as binvox files requires the
`binvox` executable to be in your path.
"""
import os
import subprocess
import numpy as np
import collections
from .. import util
from ..base import Trimesh
# find the executable for binvox in PATH
binvox_encoder = util.which('binvox')
Binvox = collections.namedtuple(
'Binvox', ['rle_data', 'shape', 'translate', 'scale'])
def parse_binvox_header(fp):
"""
Read the header from a binvox file.
Spec available:
https://www.patrickmin.com/binvox/binvox.html
Parameters
------------
fp: file-object
File like object with binvox file
Returns
----------
shape : tuple
Shape of binvox according to binvox spec
translate : tuple
Translation
scale : float
Scale of voxels
Raises
------------
IOError
If invalid binvox file.
"""
line = fp.readline().strip()
if hasattr(line, 'decode'):
binvox = b'#binvox'
space = b' '
else:
binvox = '#binvox'
space = ' '
if not line.startswith(binvox):
raise IOError('Not a binvox file')
shape = tuple(
int(s) for s in fp.readline().strip().split(space)[1:])
translate = tuple(
float(s) for s in fp.readline().strip().split(space)[1:])
scale = float(fp.readline().strip().split(space)[1])
fp.readline()
return shape, translate, scale
def parse_binvox(fp, writeable=False):
"""
Read a binvox file, spec at
https://www.patrickmin.com/binvox/binvox.html
Parameters
------------
fp: file-object
File like object with binvox file
Returns
----------
binvox : namedtuple
Containing data
rle : numpy array
Run length encoded data
Raises
------------
IOError
If invalid binvox file
"""
# get the header info
shape, translate, scale = parse_binvox_header(fp)
# get the rest of the file
data = fp.read()
# convert to numpy array
rle_data = np.frombuffer(data, dtype=np.uint8)
if writeable:
rle_data = rle_data.copy()
return Binvox(rle_data, shape, translate, scale)
_binvox_header = '''#binvox 1
dim {sx} {sy} {sz}
translate {tx} {ty} {tz}
scale {scale}
data
'''
def binvox_header(shape, translate, scale):
"""
Get a binvox header string.
Parameters
--------
shape: length 3 iterable of ints denoting shape of voxel grid.
translate: length 3 iterable of floats denoting translation.
scale: num length of entire voxel grid.
Returns
--------
string including "data\n" line.
"""
sx, sy, sz = (int(s) for s in shape)
tx, ty, tz = translate
return _binvox_header.format(
sx=sx, sy=sy, sz=sz, tx=tx, ty=ty, tz=tz, scale=scale)
def binvox_bytes(rle_data, shape, translate=(0, 0, 0), scale=1):
"""Get a binary representation of binvox data.
Parameters
--------
rle_data : numpy array
Run-length encoded numpy array.
shape : (3,) int
Shape of voxel grid.
translate : (3,) float
Translation of voxels
scale : float
Length of entire voxel grid.
Returns
--------
data : bytes
Suitable for writing to binary file
"""
if rle_data.dtype != np.uint8:
raise ValueError(
"rle_data.dtype must be np.uint8, got %s" % rle_data.dtype)
header = binvox_header(shape, translate, scale).encode()
return header + rle_data.tobytes()
def voxel_from_binvox(
rle_data, shape, translate=None, scale=1.0, axis_order='xzy'):
"""
Factory for building from data associated with binvox files.
Parameters
---------
rle_data : numpy
Run-length-encoded of flat voxel
values, or a `trimesh.rle.RunLengthEncoding` object.
See `trimesh.rle` documentation for description of encoding
shape : (3,) int
Shape of voxel grid.
translate : (3,) float
Translation of voxels
scale : float
Length of entire voxel grid.
encoded_axes : iterable
With values in ('x', 'y', 'z', 0, 1, 2),
where x => 0, y => 1, z => 2
denoting the order of axes in the encoded data. binvox by
default saves in xzy order, but using `xyz` (or (0, 1, 2)) will
be faster in some circumstances.
Returns
---------
result : VoxelGrid
Loaded voxels
"""
# shape must be uniform else scale is ambiguous
from ..voxel import encoding as enc
from ..voxel.base import VoxelGrid
from .. import transformations
if isinstance(rle_data, enc.RunLengthEncoding):
encoding = rle_data
else:
encoding = enc.RunLengthEncoding(rle_data, dtype=bool)
# translate = np.asanyarray(translate) * scale)
# translate = [0, 0, 0]
transform = transformations.scale_and_translate(
scale=scale / (np.array(shape) - 1),
translate=translate)
if axis_order == 'xzy':
perm = (0, 2, 1)
shape = tuple(shape[p] for p in perm)
encoding = encoding.reshape(shape).transpose(perm)
elif axis_order is None or axis_order == 'xyz':
encoding = encoding.reshape(shape)
else:
raise ValueError(
"Invalid axis_order '%s': must be None, 'xyz' or 'xzy'")
assert encoding.shape == shape
return VoxelGrid(encoding, transform)
def load_binvox(file_obj,
resolver=None,
axis_order='xzy',
file_type=None):
"""
Load trimesh `VoxelGrid` instance from file.
Parameters
-----------
file_obj : file-like object
Contains binvox data
resolver : unused
axis_order : str
Order of axes in encoded data.
Binvox default is 'xzy', but 'xyz' may be faster
where this is not relevant.
Returns
---------
result : trimesh.voxel.VoxelGrid
Loaded voxel data
"""
if file_type is not None and file_type != 'binvox':
raise ValueError(
'file_type must be None or binvox, got %s' % file_type)
data = parse_binvox(file_obj, writeable=True)
return voxel_from_binvox(
rle_data=data.rle_data,
shape=data.shape,
translate=data.translate,
scale=data.scale,
axis_order=axis_order)
def export_binvox(voxel, axis_order='xzy'):
"""
Export `trimesh.voxel.VoxelGrid` instance to bytes
Parameters
------------
voxel : `trimesh.voxel.VoxelGrid`
Assumes axis ordering of `xyz` and encodes
in binvox default `xzy` ordering.
axis_order : str
Eements in ('x', 'y', 'z', 0, 1, 2), the order
of axes to encode data (standard is 'xzy' for binvox). `voxel`
data is assumed to be in order 'xyz'.
Returns
-----------
result : bytes
Representation according to binvox spec
"""
translate = voxel.translation
scale = voxel.scale * ((np.array(voxel.shape) - 1))
neg_scale, = np.where(scale < 0)
encoding = voxel.encoding.flip(neg_scale)
scale = np.abs(scale)
if not util.allclose(scale[0], scale[1:], 1e-6 * scale[0] + 1e-8):
raise ValueError('Can only export binvox with uniform scale')
scale = scale[0]
if axis_order == 'xzy':
encoding = encoding.transpose((0, 2, 1))
elif axis_order != 'xyz':
raise ValueError('Invalid axis_order: must be one of ("xyz", "xzy")')
rle_data = encoding.flat.run_length_data(dtype=np.uint8)
return binvox_bytes(
rle_data, shape=voxel.shape, translate=translate, scale=scale)
class Binvoxer(object):
"""
Interface for binvox CL tool.
This class is responsible purely for making calls to the CL tool. It
makes no attempt to integrate with the rest of trimesh at all.
Constructor args configure command line options.
`Binvoxer.__call__` operates on the path to a mode file.
If using this interface in published works, please cite the references
below.
See CL tool website for further details.
https://www.patrickmin.com/binvox/
@article{nooruddin03,
author = {Fakir S. Nooruddin and Greg Turk},
title = {Simplification and Repair of Polygonal Models Using Volumetric
Techniques},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {9},
number = {2},
pages = {191--205},
year = {2003}
}
@Misc{binvox,
author = {Patrick Min},
title = {binvox},
howpublished = {{\tt http://www.patrickmin.com/binvox} or
{\tt https://www.google.com/search?q=binvox}},
year = {2004 - 2019},
note = {Accessed: yyyy-mm-dd}
}
"""
SUPPORTED_INPUT_TYPES = (
'ug',
'obj',
'off',
'dfx',
'xgl',
'pov',
'brep',
'ply',
'jot',
)
SUPPORTED_OUTPUT_TYPES = (
'binvox',
'hips',
'mira',
'vtk',
'raw',
'schematic',
'msh',
)
def __init__(
self,
dimension=32,
file_type='binvox',
z_buffer_carving=True,
z_buffer_voting=True,
dilated_carving=False,
exact=False,
bounding_box=None,
remove_internal=False,
center=False,
rotate_x=0,
rotate_z=0,
wireframe=False,
fit=False,
block_id=None,
use_material_block_id=False,
use_offscreen_pbuffer=True,
downsample_factor=None,
downsample_threshold=None,
verbose=False,
binvox_path=None):
"""
Configure the voxelizer.
Parameters
------------
dimension: voxel grid size (max 1024 when not using exact)
file_type: str
Output file type, supported types are:
'binvox'
'hips'
'mira'
'vtk'
'raw'
'schematic'
'msh'
z_buffer_carving : use z buffer based carving. At least one of
`z_buffer_carving` and `z_buffer_voting` must be True.
z_buffer_voting: use z-buffer based parity voting method.
dilated_carving: stop carving 1 voxel before intersection.
exact: any voxel with part of a triangle gets set. Does not use
graphics card.
bounding_box: 6-element float list/tuple of min, max values,
(minx, miny, minz, maxx, maxy, maxz)
remove_internal: remove internal voxels if True. Note there is some odd
behaviour if boundary voxels are occupied.
center: center model inside unit cube.
rotate_x: number of 90 degree ccw rotations around x-axis before
voxelizing.
rotate_z: number of 90 degree cw rotations around z-axis before
voxelizing.
wireframe: also render the model in wireframe (helps with thin parts).
fit: only write voxels in the voxel bounding box.
block_id: when converting to schematic, use this as the block ID.
use_matrial_block_id: when converting from obj to schematic, parse
block ID from material spec "usemtl blockid_<id>" (ids 1-255 only).
use_offscreen_pbuffer: use offscreen pbuffer instead of onscreen
window.
downsample_factor: downsample voxels by this factor in each dimension.
Must be a power of 2 or None. If not None/1 and `core dumped`
errors occur, try slightly adjusting dimensions.
downsample_threshold: when downsampling, destination voxel is on if
more than this number of voxels are on.
verbose : bool
If False, silences stdout/stderr from subprocess call.
binvox_path : str
Path to binvox executable. The default looks for an
executable called `binvox` on your `PATH`.
"""
if binvox_path is None:
encoder = binvox_encoder
else:
encoder = binvox_path
if encoder is None:
raise IOError(' '.join([
'No `binvox_path` provided and no binvox executable found',
'on PATH, please go to https://www.patrickmin.com/binvox/ and',
'download the appropriate version.']))
if dimension > 1024 and not exact:
raise ValueError(
'Maximum dimension using exact is 1024, got %d' % dimension)
if file_type not in Binvoxer.SUPPORTED_OUTPUT_TYPES:
raise ValueError(
'file_type %s not in set of supported output types %s' %
(file_type, str(Binvoxer.SUPPORTED_OUTPUT_TYPES)))
args = [encoder, '-d', str(dimension), '-t', file_type]
if exact:
args.append('-e')
if z_buffer_carving:
if z_buffer_voting:
pass
else:
args.append('-c')
elif z_buffer_voting:
args.append('-v')
else:
raise ValueError(
'One of `z_buffer_carving` or `z_buffer_voting` must be True')
if dilated_carving:
args.append('-dc')
# Additional parameters
if bounding_box is not None:
if len(bounding_box) != 6:
raise ValueError('bounding_box must have 6 elements')
args.append('-bb')
args.extend(str(b) for b in bounding_box)
if remove_internal:
args.append('-ri')
if center:
args.append('-cb')
args.extend(('-rotx',) * rotate_x)
args.extend(('-rotz',) * rotate_z)
if wireframe:
args.append('-aw')
if fit:
args.append('-fit')
if block_id is not None:
args.extend(('-bi', block_id))
if use_material_block_id:
args.append('-mb')
if use_offscreen_pbuffer:
args.append('-pb')
if downsample_factor is not None:
times = np.log2(downsample_factor)
if int(times) != times:
raise ValueError(
'downsample_factor must be a power of 2, got %d'
% downsample_factor)
args.extend(('-down',) * int(times))
if downsample_threshold is not None:
args.extend(('-dmin', str(downsample_threshold)))
args.append('PATH')
self._args = args
self._file_type = file_type
self.verbose = verbose
@property
def file_type(self):
return self._file_type
def __call__(self, path, overwrite=False):
"""
Create an voxel file in the same directory as model at `path`.
Parameters
------------
path: string path to model file. Supported types:
'ug'
'obj'
'off'
'dfx'
'xgl'
'pov'
'brep'
'ply'
'jot' (polygongs only)
overwrite: if False, checks the output path (head.file_type) is empty
before running. If True and a file exists, raises an IOError.
Returns
------------
string path to voxel file. File type give by file_type in constructor.
"""
head, ext = os.path.splitext(path)
ext = ext[1:].lower()
if ext not in Binvoxer.SUPPORTED_INPUT_TYPES:
raise ValueError(
'file_type %s not in set of supported input types %s' %
(ext, str(Binvoxer.SUPPORTED_INPUT_TYPES)))
out_path = '%s.%s' % (head, self._file_type)
if os.path.isfile(out_path) and not overwrite:
raise IOError('Attempted to voxelize object at existing path')
self._args[-1] = path
# generalizes to python2 and python3
# will capture terminal output into variable rather than printing
verbosity = subprocess.check_output(self._args)
# if requested print ourselves
if self.verbose:
print(verbosity)
return out_path
def voxelize_mesh(mesh,
binvoxer=None,
export_type='off',
**binvoxer_kwargs):
"""
Interface for voxelizing Trimesh object via the binvox tool.
Implementation simply saved the mesh in the specified export_type then
runs the `Binvoxer.__call__` (using either the supplied `binvoxer` or
creating one via `binvoxer_kwargs`)
Parameters
------------
mesh: Trimesh object to voxelize.
binvoxer: optional Binvoxer instance.
export_type: file type to export mesh as temporarily for Binvoxer to
operate on.
**binvoxer_kwargs: kwargs for creating a new Binvoxer instance. If binvoxer
if provided, this must be empty.
Returns
------------
`VoxelGrid` object resulting.
"""
if not isinstance(mesh, Trimesh):
raise ValueError('mesh must be Trimesh instance, got %s' % str(mesh))
if binvoxer is None:
binvoxer = Binvoxer(**binvoxer_kwargs)
elif len(binvoxer_kwargs) > 0:
raise ValueError('Cannot provide binvoxer and binvoxer_kwargs')
if binvoxer.file_type != 'binvox':
raise ValueError(
'Only "binvox" binvoxer `file_type` currently supported')
with util.TemporaryDirectory() as folder:
model_path = os.path.join(folder, 'model.%s' % export_type)
with open(model_path, 'wb') as fp:
mesh.export(fp, file_type=export_type)
out_path = binvoxer(model_path)
with open(out_path, 'rb') as fp:
out_model = load_binvox(fp)
return out_model
_binvox_loaders = {'binvox': load_binvox}
| {
"content_hash": "e35379441715fd72431aae8fe8073c77",
"timestamp": "",
"source": "github",
"line_count": 579,
"max_line_length": 79,
"avg_line_length": 30.694300518134714,
"alnum_prop": 0.5804636506864731,
"repo_name": "mikedh/trimesh",
"id": "ec3569f6e3d8674771877fcc35069577a933d8cc",
"size": "17772",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "trimesh/exchange/binvox.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2855"
},
{
"name": "HTML",
"bytes": "580"
},
{
"name": "JavaScript",
"bytes": "5887"
},
{
"name": "Makefile",
"bytes": "1862"
},
{
"name": "Python",
"bytes": "2142314"
},
{
"name": "Shell",
"bytes": "5161"
}
],
"symlink_target": ""
} |
"""Unit tests for Deployment methods."""
import github3
from .helper import UnitIteratorHelper, create_url_helper, create_example_data_helper
url_for = create_url_helper(
'https://api.github.com/repos/octocat/example/deployments/1'
)
get_repo_example_data = create_example_data_helper('repos_deployment_example')
example_data = get_repo_example_data()
class TestDeploymentIterators(UnitIteratorHelper):
"""Test Deployment methods that return iterators."""
described_class = github3.repos.deployment.Deployment
example_data = example_data
def test_statuses(self):
"""Test the request to retrieve a deployment's statuses."""
i = self.instance.statuses()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('statuses'),
params={'per_page': 100},
headers={}
)
| {
"content_hash": "fd9d08340bb8d3f006b96956c6e52464",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 85,
"avg_line_length": 28.29032258064516,
"alnum_prop": 0.6795895096921323,
"repo_name": "degustaf/github3.py",
"id": "d2a0d22941e15b692ddfaa03e52fb1c92443870d",
"size": "877",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/unit/test_repos_deployment.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "362"
},
{
"name": "Python",
"bytes": "686961"
}
],
"symlink_target": ""
} |
"""Experimental API for building input pipelines.
This module contains experimental `Dataset` sources and transformations that can
be used in conjunction with the `tf.data.Dataset` API. Note that the
`tf.data.experimental` API is not subject to the same backwards compatibility
guarantees as `tf.data`, but we will provide deprecation advice in advance of
removing existing functionality.
See [Importing Data](https://tensorflow.org/guide/datasets) for an overview.
@@Counter
@@CheckpointInputPipelineHook
@@CsvDataset
@@Optional
@@RandomDataset
@@Reducer
@@SqlDataset
@@TFRecordWriter
@@bucket_by_sequence_length
@@choose_from_datasets
@@copy_to_device
@@dense_to_sparse_batch
@@enumerate_dataset
@@get_next_as_optional
@@get_single_element
@@group_by_reducer
@@group_by_window
@@ignore_errors
@@latency_stats
@@make_batched_features_dataset
@@make_csv_dataset
@@make_saveable_from_iterator
@@map_and_batch
@@parallel_interleave
@@parse_example_dataset
@@prefetch_to_device
@@rejection_resample
@@sample_from_datasets
@@scan
@@set_stats_aggregator
@@shuffle_and_repeat
@@StatsAggregator
@@unbatch
@@unique
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.data.experimental.ops.batching import dense_to_sparse_batch
from tensorflow.python.data.experimental.ops.batching import map_and_batch
from tensorflow.python.data.experimental.ops.batching import unbatch
from tensorflow.python.data.experimental.ops.counter import Counter
from tensorflow.python.data.experimental.ops.enumerate_ops import enumerate_dataset
from tensorflow.python.data.experimental.ops.error_ops import ignore_errors
from tensorflow.python.data.experimental.ops.get_single_element import get_single_element
from tensorflow.python.data.experimental.ops.grouping import bucket_by_sequence_length
from tensorflow.python.data.experimental.ops.grouping import group_by_reducer
from tensorflow.python.data.experimental.ops.grouping import group_by_window
from tensorflow.python.data.experimental.ops.grouping import Reducer
from tensorflow.python.data.experimental.ops.interleave_ops import choose_from_datasets
from tensorflow.python.data.experimental.ops.interleave_ops import parallel_interleave
from tensorflow.python.data.experimental.ops.interleave_ops import sample_from_datasets
from tensorflow.python.data.experimental.ops.iterator_ops import CheckpointInputPipelineHook
from tensorflow.python.data.experimental.ops.iterator_ops import make_saveable_from_iterator
# Optimization constant that can be used to enable auto-tuning.
from tensorflow.python.data.experimental.ops.optimization import AUTOTUNE
from tensorflow.python.data.experimental.ops.parsing_ops import parse_example_dataset
from tensorflow.python.data.experimental.ops.prefetching_ops import copy_to_device
from tensorflow.python.data.experimental.ops.prefetching_ops import prefetch_to_device
from tensorflow.python.data.experimental.ops.random_ops import RandomDataset
from tensorflow.python.data.experimental.ops.readers import CsvDataset
from tensorflow.python.data.experimental.ops.readers import make_batched_features_dataset
from tensorflow.python.data.experimental.ops.readers import make_csv_dataset
from tensorflow.python.data.experimental.ops.readers import SqlDataset
from tensorflow.python.data.experimental.ops.resampling import rejection_resample
from tensorflow.python.data.experimental.ops.scan_ops import scan
from tensorflow.python.data.experimental.ops.shuffle_ops import shuffle_and_repeat
from tensorflow.python.data.experimental.ops.stats_ops import latency_stats
from tensorflow.python.data.experimental.ops.stats_ops import set_stats_aggregator
from tensorflow.python.data.experimental.ops.stats_ops import StatsAggregator
from tensorflow.python.data.experimental.ops.unique import unique
from tensorflow.python.data.experimental.ops.writers import TFRecordWriter
from tensorflow.python.data.ops.iterator_ops import get_next_as_optional
from tensorflow.python.data.ops.optional_ops import Optional
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
| {
"content_hash": "70b88812bdc1e6ab6fb11764acc688d6",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 92,
"avg_line_length": 44.36842105263158,
"alnum_prop": 0.8348754448398576,
"repo_name": "snnn/tensorflow",
"id": "2ac159d38aafdc07f2c8725780448c85c67e3ec4",
"size": "4904",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/experimental/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3325"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "339398"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "49741628"
},
{
"name": "CMake",
"bytes": "195409"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1254047"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "867093"
},
{
"name": "Jupyter Notebook",
"bytes": "2604735"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "58612"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41593453"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "476832"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
import argparse
import base64
import httplib2
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
class AnnotationResult(object):
def __init__(self, json_response):
self.json_response = json_response
self._parsed = False
def __str__(self):
return str(self.json_response)
def Parse(self):
if not self.json_response:
return False
if ('responses' not in self.json_response) or len(self.json_response[
'responses']) == 0:
return False
response = self.json_response['responses'][0]
if ('textAnnotations' not in
response) or len(response['textAnnotations']) == 0:
return False
self.token_count = len(response['textAnnotations']) - 1
self.all_annotation = response['textAnnotations'][0]
if ('description' not in self.all_annotation) or (
not self.all_annotation['description']) or (
'boundingPoly' not in self.all_annotation):
return False
self.all_bounding_box = self.BoundingBoxJson2Set(self.all_annotation[
'boundingPoly']['vertices'])
self.all_text = self.all_annotation['description']
sub_annotations = response['textAnnotations'][1:]
self.sub_bounding_boxes = [
self.BoundingBoxJson2Set(sub_annotation['boundingPoly']['vertices'])
for sub_annotation in sub_annotations
]
self.sub_texts = [sub_annotation['description']
for sub_annotation in sub_annotations]
self._parsed = True
return True
def GetBoundingBox(self):
assert self._parsed
return self.all_bounding_box
def GetSubBoundingBoxes(self):
assert self._parsed
return self.sub_bounding_boxes
def GetSubTexts(self):
assert self._parsed
return self.sub_texts
def GetSubBoxTextMap(self):
assert self._parsed
return {box: text
for box, text in zip(self.sub_bounding_boxes, self.sub_texts)}
@staticmethod
def BoundingBoxJson2Set(json_box):
'''Return a list of (x, y) sorted by x then y in ascending order.'''
return (json_box[0]['x'], json_box[0]['y'], json_box[2]['x'],
json_box[2]['y'])
class TextAnnotator(object):
# The url template to retrieve the discovery document for trusted testers.
DISCOVERY_URL = 'https://{api}.googleapis.com/$discovery/rest?version={apiVersion}'
def __init__(self):
self.credentials = GoogleCredentials.get_application_default()
self.service = discovery.build(
'vision',
'v1',
credentials=self.credentials,
discoveryServiceUrl=TextAnnotator.DISCOVERY_URL)
def GetTextAnnotations(self, image_file):
with open(image_file, 'rb') as image:
image_content = base64.b64encode(image.read())
service_request = self.service.images().annotate(body={
'requests': [{
'image': {
'content': image_content.decode('UTF-8')
},
'features': [{
'type': 'TEXT_DETECTION',
'maxResults': 1
}]
}]
})
response = AnnotationResult(service_request.execute())
return response
def main():
"""Run a label request on a single image"""
# parser = argparse.ArgumentParser()
# parser.add_argument('image_file', help='The image you\'d like to label.')
# args = parser.parse_args()
# image_file = args.image_file
text_annotator = TextAnnotator()
# image_file = '../data/test1.jpg'
# image_file = '../data/roast_pork.png'
image_file = '../data/two_line.png'
# image_file = '../data/rotate.jpg'
result = text_annotator.GetTextAnnotations(image_file)
if result.Parse():
print(result.GetBoundingBox())
print(result.GetSubBoundingBoxes())
# print('number of text entities: %d' %
# len(response['responses'][0]['textAnnotations']))
# content = response['responses'][0]['textAnnotations'][0]['description']
# print('Found content: %s for %s' % (content, image_file))
# return 0
if __name__ == '__main__':
main()
| {
"content_hash": "dab1f1542dbe0ec10bc90e7d0a84c297",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 85,
"avg_line_length": 30.515151515151516,
"alnum_prop": 0.641261171797418,
"repo_name": "wenjiesha/menu_picture",
"id": "f7e3736cffe3dac77c7f759f3bd156eb1087d7eb",
"size": "4028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ocr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "5582"
},
{
"name": "Python",
"bytes": "11206"
}
],
"symlink_target": ""
} |
"""Tests for the elpy.impmagic module"""
import re
import sys
import unittest
from elpy import impmagic
from elpy.tests.support import BackendTestCase
TEST_SOURCE = '''# test file
import time
import logging
os.getcwd()
time.sleep(1)
'''
@unittest.skipIf(sys.version_info >= (3, 5), "importmagic fails in 3.5")
class ImportMagicTestCase(BackendTestCase):
def setUp(self):
if not impmagic.importmagic:
raise unittest.SkipTest
self.importmagic = impmagic.ImportMagic()
super(ImportMagicTestCase, self).setUp()
def build_index(self):
self.project_file('mymod.py', 'class AnUncommonName:\n pass\n')
self.importmagic.build_index(self.project_root,
custom_path=[self.project_root],
blacklist_re=re.compile('^$'))
self.importmagic._thread.join()
def test_get_symbols(self):
self.build_index()
candidates = self.importmagic.get_import_symbols('AnUncommonName')
self.assertEqual(candidates, ['from mymod import AnUncommonName'])
candidates = self.importmagic.get_import_symbols('mymod')
self.assertEqual(candidates, ['import mymod'])
def test_add_import(self):
self.build_index()
start, end, newblock = self.importmagic.add_import(
TEST_SOURCE, 'from mymod import AnUncommonName')
self.assertEqual(start, 2)
self.assertEqual(end, 5)
self.assertEqual(newblock.strip(),
'import logging\n'
'import time\n'
'\n'
'from mymod import AnUncommonName')
start, end, newblock = self.importmagic.add_import(
TEST_SOURCE, 'import mymod')
self.assertEqual(start, 2)
self.assertEqual(end, 5)
self.assertEqual(newblock.strip(),
'import logging\nimport time\n\nimport mymod')
def test_get_unresolved_symbols(self):
self.build_index()
symbols = self.importmagic.get_unresolved_symbols('x = a + b\ny = c.d')
self.assertEqual(sorted(symbols), ['a', 'b', 'c.d'])
def test_remove_unreferenced_imports(self):
self.build_index()
start, end, newblock = \
self.importmagic.remove_unreferenced_imports(TEST_SOURCE)
self.assertEqual(start, 2)
self.assertEqual(end, 5)
self.assertEqual(newblock.strip(), 'import time')
| {
"content_hash": "0aa7961249f7d0f62f2dfdaea176526d",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 79,
"avg_line_length": 34.55555555555556,
"alnum_prop": 0.6097266881028939,
"repo_name": "DaVinci789/.emacs.d",
"id": "97c6f86b0604afd0f39d93654be6df5c4cb2c851",
"size": "2505",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "elpa/elpy-20160131.118/elpy/tests/test_impmagic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "313549"
},
{
"name": "C++",
"bytes": "10287"
},
{
"name": "Common Lisp",
"bytes": "1254182"
},
{
"name": "Emacs Lisp",
"bytes": "20412026"
},
{
"name": "Groff",
"bytes": "737459"
},
{
"name": "HTML",
"bytes": "184"
},
{
"name": "Makefile",
"bytes": "19042"
},
{
"name": "NewLisp",
"bytes": "690"
},
{
"name": "Perl",
"bytes": "4466"
},
{
"name": "Python",
"bytes": "170938"
},
{
"name": "Ruby",
"bytes": "7652"
},
{
"name": "Scheme",
"bytes": "134949"
},
{
"name": "Shell",
"bytes": "5210"
},
{
"name": "Standard ML",
"bytes": "9321"
},
{
"name": "TeX",
"bytes": "127633"
}
],
"symlink_target": ""
} |
import json
import math
import numpy as np
import pandas as pd
import pytest
from scipy.sparse import csr_matrix, csc_matrix
from mlflow.models.signature import infer_signature
from mlflow.models.utils import (
_Example,
_read_tensor_input_from_json,
_read_sparse_matrix_from_json,
)
from mlflow.types.utils import TensorsNotSupportedException
from mlflow.utils.file_utils import TempDir
from mlflow.utils.proto_json_utils import dataframe_from_raw_json
@pytest.fixture
def pandas_df_with_all_types():
df = pd.DataFrame(
{
"boolean": [True, False, True],
"integer": np.array([1, 2, 3], np.int32),
"long": np.array([1, 2, 3], np.int64),
"float": np.array([math.pi, 2 * math.pi, 3 * math.pi], np.float32),
"double": [math.pi, 2 * math.pi, 3 * math.pi],
"binary": [bytes([1, 2, 3]), bytes([4, 5, 6]), bytes([7, 8, 9])],
"string": ["a", "b", "c"],
"boolean_ext": [True, False, True],
"integer_ext": [1, 2, 3],
"string_ext": ["a", "b", "c"],
"array": np.array(["a", "b", "c"]),
}
)
df["boolean_ext"] = df["boolean_ext"].astype("boolean")
df["integer_ext"] = df["integer_ext"].astype("Int64")
df["string_ext"] = df["string_ext"].astype("string")
return df
@pytest.fixture
def df_with_nan():
return pd.DataFrame(
{
"boolean": [True, False, True],
"integer": np.array([1, 2, 3], np.int32),
"long": np.array([1, 2, 3], np.int64),
"float": np.array([np.nan, 2 * math.pi, 3 * math.pi], np.float32),
"double": [math.pi, np.nan, 3 * math.pi],
"binary": [bytes([1, 2, 3]), bytes([4, 5, 6]), bytes([7, 8, 9])],
"string": ["a", "b", "c"],
}
)
@pytest.fixture
def dict_of_ndarrays():
return {
"1D": np.arange(0, 12, 0.5),
"2D": np.arange(0, 12, 0.5).reshape(3, 8),
"3D": np.arange(0, 12, 0.5).reshape(2, 3, 4),
"4D": np.arange(0, 12, 0.5).reshape(3, 2, 2, 2),
}
@pytest.fixture
def dict_of_ndarrays_with_nans():
return {
"1D": np.array([0.5, np.nan, 2.0]),
"2D": np.array([[0.1, 0.2], [np.nan, 0.5]]),
"3D": np.array([[[0.1, np.nan], [0.3, 0.4]], [[np.nan, 0.6], [0.7, np.nan]]]),
}
@pytest.fixture
def dict_of_sparse_matrix():
return {
"sparse_matrix_csc": csc_matrix(np.arange(0, 12, 0.5).reshape(3, 8)),
"sparse_matrix_csr": csr_matrix(np.arange(0, 12, 0.5).reshape(3, 8)),
}
def test_input_examples(pandas_df_with_all_types, dict_of_ndarrays):
sig = infer_signature(pandas_df_with_all_types)
# test setting example with data frame with all supported data types
with TempDir() as tmp:
example = _Example(pandas_df_with_all_types)
example.save(tmp.path())
filename = example.info["artifact_path"]
with open(tmp.path(filename), "r") as f:
data = json.load(f)
assert set(data.keys()) == {"columns", "data"}
parsed_df = dataframe_from_raw_json(tmp.path(filename), schema=sig.inputs)
assert (pandas_df_with_all_types == parsed_df).all().all()
# the frame read without schema should match except for the binary values
assert (
(
parsed_df.drop(columns=["binary"])
== dataframe_from_raw_json(tmp.path(filename)).drop(columns=["binary"])
)
.all()
.all()
)
# NB: Drop columns that cannot be encoded by proto_json_utils.pyNumpyEncoder
new_df = pandas_df_with_all_types.drop(columns=["boolean_ext", "integer_ext", "string_ext"])
# pass the input as dictionary instead
with TempDir() as tmp:
d = {name: new_df[name].values for name in new_df.columns}
example = _Example(d)
example.save(tmp.path())
filename = example.info["artifact_path"]
parsed_dict = _read_tensor_input_from_json(tmp.path(filename))
assert d.keys() == parsed_dict.keys()
# Asserting binary will fail since it is converted to base64 encoded strings.
# The check above suffices that the binary input is stored.
del d["binary"]
for key in d:
np.testing.assert_array_equal(d[key], parsed_dict[key])
# input passed as numpy array
new_df = pandas_df_with_all_types.drop(columns=["binary"])
for col in new_df:
input_example = new_df[col].to_numpy()
with TempDir() as tmp:
example = _Example(input_example)
example.save(tmp.path())
filename = example.info["artifact_path"]
parsed_ary = _read_tensor_input_from_json(tmp.path(filename))
np.testing.assert_array_equal(parsed_ary, input_example)
# pass multidimensional array
for col in dict_of_ndarrays:
input_example = dict_of_ndarrays[col]
with TempDir() as tmp:
example = _Example(input_example)
example.save(tmp.path())
filename = example.info["artifact_path"]
parsed_ary = _read_tensor_input_from_json(tmp.path(filename))
np.testing.assert_array_equal(parsed_ary, input_example)
# pass multidimensional array as a list
example = np.array([[1, 2, 3]])
with pytest.raises(TensorsNotSupportedException, match=r"Row '0' has shape \(1, 3\)"):
_Example([example, example])
# pass dict with scalars
with TempDir() as tmp:
example = {"a": 1, "b": "abc"}
x = _Example(example)
x.save(tmp.path())
filename = x.info["artifact_path"]
parsed_df = dataframe_from_raw_json(tmp.path(filename))
assert example == parsed_df.to_dict(orient="records")[0]
def test_sparse_matrix_input_examples(dict_of_sparse_matrix):
for example_type, input_example in dict_of_sparse_matrix.items():
with TempDir() as tmp:
example = _Example(input_example)
example.save(tmp.path())
filename = example.info["artifact_path"]
parsed_matrix = _read_sparse_matrix_from_json(tmp.path(filename), example_type)
np.testing.assert_array_equal(parsed_matrix.toarray(), input_example.toarray())
def test_input_examples_with_nan(df_with_nan, dict_of_ndarrays_with_nans):
# test setting example with data frame with NaN values in it
sig = infer_signature(df_with_nan)
with TempDir() as tmp:
example = _Example(df_with_nan)
example.save(tmp.path())
filename = example.info["artifact_path"]
with open(tmp.path(filename), "r") as f:
data = json.load(f)
assert set(data.keys()) == {"columns", "data"}
parsed_df = dataframe_from_raw_json(tmp.path(filename), schema=sig.inputs)
# by definition of NaN, NaN == NaN is False but NaN != NaN is True
assert (
((df_with_nan == parsed_df) | ((df_with_nan != df_with_nan) & (parsed_df != parsed_df)))
.all()
.all()
)
# the frame read without schema should match except for the binary values
no_schema_df = dataframe_from_raw_json(tmp.path(filename))
a = parsed_df.drop(columns=["binary"])
b = no_schema_df.drop(columns=["binary"])
assert ((a == b) | ((a != a) & (b != b))).all().all()
# pass multidimensional array
for col in dict_of_ndarrays_with_nans:
input_example = dict_of_ndarrays_with_nans[col]
sig = infer_signature(input_example)
with TempDir() as tmp:
example = _Example(input_example)
example.save(tmp.path())
filename = example.info["artifact_path"]
parsed_ary = _read_tensor_input_from_json(tmp.path(filename), schema=sig.inputs)
assert np.array_equal(parsed_ary, input_example, equal_nan=True)
# without a schema/dtype specified, the resulting tensor will keep the None type
no_schema_df = _read_tensor_input_from_json(tmp.path(filename))
np.testing.assert_array_equal(
no_schema_df, np.where(np.isnan(input_example), None, input_example)
)
| {
"content_hash": "7b949e72f7b773f32e1cc370dd1f19bc",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 100,
"avg_line_length": 39.186602870813395,
"alnum_prop": 0.5822954822954823,
"repo_name": "mlflow/mlflow",
"id": "103c9382225201e498e3b992cb14b30a16810501",
"size": "8190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/models/test_model_input_examples.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24965"
},
{
"name": "Dockerfile",
"bytes": "1206"
},
{
"name": "HTML",
"bytes": "16439"
},
{
"name": "Java",
"bytes": "276538"
},
{
"name": "JavaScript",
"bytes": "3606345"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "6057051"
},
{
"name": "R",
"bytes": "202454"
},
{
"name": "Scala",
"bytes": "39353"
},
{
"name": "Shell",
"bytes": "27246"
},
{
"name": "TSQL",
"bytes": "211"
},
{
"name": "TypeScript",
"bytes": "313772"
}
],
"symlink_target": ""
} |
"""
Response is a module to handle the response of the FOXSI telescopes
"""
from __future__ import absolute_import
import pandas as pd
import numpy as np
import warnings
import os
import matplotlib.pyplot as plt
import astropy.units as u
from scipy import interpolate
import pyfoxsi
import h5py
__all__ = ['Response', 'Material']
class Response(object):
"""An object which provides the FOXSI telescope response
Parameters
----------
shutter_state : int
A number representing the state of the shutter (0 - no shutter, 1 - thin shutter, 2 - thick shutter)
Examples
--------
>>> from pyfoxsi.response import Response
>>> resp = Response()
>>> resp1 = Response(shutter_state=1)
"""
def __init__(self, shutter_state=0):
path = os.path.dirname(pyfoxsi.__file__)
for i in np.arange(3):
path = os.path.dirname(path)
path = os.path.join(path, 'data/')
filename = 'effective_area_per_shell.csv'
effarea_file = os.path.join(path, filename)
self._eff_area_per_shell = pd.read_csv(effarea_file, index_col=0)
# find what shells are missing
shell_numbers = np.array(self._eff_area_per_shell.columns, np.uint)
missing_shells = np.setdiff1d(shell_numbers, pyfoxsi.shell_ids)
# remove the missing shells
self.__number_of_telescopes = 1
for missing_shell in missing_shells:
self._eff_area_per_shell.drop(str(missing_shell), 1, inplace=True)
# now add the effective area of all of the shells together
self.optics_effective_area = pd.DataFrame({'module': self._eff_area_per_shell.sum(axis=1), 'total': self._eff_area_per_shell.sum(axis=1)})
self.effective_area = self.optics_effective_area.copy()
self.number_of_telescopes = pyfoxsi.number_of_telescopes
self._set_default_optical_path()
if shutter_state > 0:
self.__optical_path.append(Material('al', pyfoxsi.shutters_thickness[shutter_state]))
self.__shutter_state = shutter_state
self._add_optical_path_to_effective_area()
def plot(self, axes=None):
"""Plot the effective area"""
if axes is None:
axes = plt.gca()
a = self.effective_area.plot(axes=axes)
axes.set_title(pyfoxsi.mission_title + ' ' + str(self.number_of_telescopes) + 'x ' + 'Shutter State ' + str(self.shutter_state))
axes.set_ylabel('Effective area [cm$^2$]')
axes.set_xlabel('Energy [keV]')
def _set_default_optical_path(self):
self.__optical_path = [Material('mylar', pyfoxsi.blanket_thickness),
Material(pyfoxsi.detector_material, pyfoxsi.detector_thickness)]
@property
def number_of_telescopes(self):
"""The total number of telescope modules"""
return self.__number_of_telescopes
@number_of_telescopes.setter
def number_of_telescopes(self, x):
self.optics_effective_area['total'] = self.optics_effective_area['total'] / self.__number_of_telescopes * x
self.__number_of_telescopes = x
@property
def optical_path(self):
"""The materials in the optical path including the detector"""
return self.__optical_path
@optical_path.setter
def optical_path(self, x):
self.optical_path = x
self._add_optical_path_to_effective_area()
@property
def shutter_state(self):
"""The shutter state, allowed values are 0, 1, 2"""
return self.__shutter_state
@shutter_state.setter
def shutter_state(self, x):
raise AttributeError('Cannot change shutter state. Create new object with desired shutter state')
def _add_optical_path_to_effective_area(self):
"""Add the effect of the optical path to the effective area"""
energies = np.array(self.optics_effective_area.index)
factor = np.ones(energies.shape)
# Apply all of the materials in the optical path to factor
for material in self.optical_path:
print(material.name)
if material.name == pyfoxsi.detector_material:
# if it is the detector than we want the absorption
factor *= factor * material.absorption(energies)
else:
factor *= factor * material.transmission(energies)
self.effective_area['factor'] = factor
self.effective_area['total'] = factor * self.optics_effective_area['total']
self.effective_area['module'] = factor * self.optics_effective_area['module']
class Material(object):
"""An object which provides the optical properties of a material in x-rays
Parameters
----------
material : str
A string representing a material (e.g. cdte, be, mylar, si)
thickness : `astropy.units.Quantity`
The thickness of the material in the optical path.
Examples
--------
>>> from pyfoxsi.response import Material
>>> import astropy.units as u
>>> detector = Material('cdte', 500 * u.um)
>>> thermal_blankets = Material('mylar', 0.5 * u.mm)
"""
def __init__(self, material, thickness):
self.name = material
self.thickness = thickness
path = os.path.dirname(pyfoxsi.__file__)
for i in np.arange(3):
path = os.path.dirname(path)
path = os.path.join(path, 'data/')
filename = 'mass_attenuation_coefficient.hdf5'
data_file = os.path.join(path, filename)
h = h5py.File(data_file, 'r')
data = h[self.name]
self._source_data = data
self.density = u.Quantity(self._source_data.attrs['density'], self._source_data.attrs['density unit'])
data_energy_kev = np.log10(self._source_data[0,:] * 1000)
data_attenuation_coeff = np.log10(self._source_data[1,:])
self._f = interpolate.interp1d(data_energy_kev, data_attenuation_coeff, bounds_error=False, fill_value=0.0)
self._mass_attenuation_coefficient_func = lambda x: 10 ** self._f(np.log10(x))
def __repr__(self):
"""Returns a human-readable representation."""
return '<Material ' + str(self.name) + ' ' + str(self.thickness) + '>'
def transmission(self, energy):
"""Provide the transmission fraction (0 to 1).
Parameters
----------
energy : `astropy.units.Quantity`
An array of energies in keV
"""
coefficients = self._mass_attenuation_coefficient_func(energy) * u.cm ** 2 / u.gram
transmission = np.exp(- coefficients * self.density * self.thickness)
return transmission
def absorption(self, energy):
"""Provides the absorption fraction (0 to 1).
Parameters
----------
energy : `astropy.units.Quantity`
An array of energies in keV.
"""
return 1 - self.transmission(energy)
def plot(self, axes=None):
if axes is None:
axes = plt.gca()
energies = np.arange(1, 60)
axes.plot(energies, self.transmission(energies), label='Transmission')
axes.plot(energies, self.absorption(energies), label='Absorption')
axes.set_ylim(0, 1.2)
axes.legend()
axes.set_title(self.name + ' ' + str(self.thickness))
axes.set_xlabel('Energy [keV]')
| {
"content_hash": "6c56d9c47ce6515db853217ece7da468",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 146,
"avg_line_length": 37.173469387755105,
"alnum_prop": 0.6231128191051332,
"repo_name": "samuel-badman/foxsi-smex",
"id": "c9c07e39ad9d5cfce81c6e5a6d9b0071d413e860",
"size": "7286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyfoxsi/src/pyfoxsi/response/response.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7255"
},
{
"name": "IDL",
"bytes": "51828"
},
{
"name": "Makefile",
"bytes": "7422"
},
{
"name": "Prolog",
"bytes": "893"
},
{
"name": "Python",
"bytes": "27084"
}
],
"symlink_target": ""
} |
from django.contrib.admin import ModelAdmin, TabularInline
from django.contrib.admin.helpers import InlineAdminForm
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
from django.test import RequestFactory, TestCase, override_settings
from django.urls import reverse
from .admin import InnerInline, site as admin_site
from .models import (
Author, BinaryTree, Book, Chapter, Child, ChildModel1, ChildModel2,
Fashionista, FootNote, Holder, Holder2, Holder3, Holder4, Inner, Inner2,
Inner3, Inner4Stacked, Inner4Tabular, Novel, OutfitItem, Parent,
ParentModelWithCustomPk, Person, Poll, Profile, ProfileCollection,
Question, Sighting, SomeChildModel, SomeParentModel, Teacher,
)
INLINE_CHANGELINK_HTML = 'class="inlinechangelink">Change</a>'
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', email='super@example.com', password='secret')
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInline(TestDataMixin, TestCase):
factory = RequestFactory()
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.holder = Holder.objects.create(dummy=13)
Inner.objects.create(dummy=42, holder=cls.holder)
def setUp(self):
self.client.force_login(self.superuser)
def test_can_delete(self):
"""
can_delete should be passed to inlineformset factory.
"""
response = self.client.get(
reverse('admin:admin_inlines_holder_change', args=(self.holder.id,))
)
inner_formset = response.context['inline_admin_formsets'][0].formset
expected = InnerInline.can_delete
actual = inner_formset.can_delete
self.assertEqual(expected, actual, 'can_delete must be equal')
def test_readonly_stacked_inline_label(self):
"""Bug #13174."""
holder = Holder.objects.create(dummy=42)
Inner.objects.create(holder=holder, dummy=42, readonly='')
response = self.client.get(
reverse('admin:admin_inlines_holder_change', args=(holder.id,))
)
self.assertContains(response, '<label>Inner readonly label:</label>')
def test_many_to_many_inlines(self):
"Autogenerated many-to-many inlines are displayed correctly (#13407)"
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# The heading for the m2m inline block uses the right text
self.assertContains(response, '<h2>Author-book relationships</h2>')
# The "add another" label is correct
self.assertContains(response, 'Add another Author-book relationship')
# The '+' is dropped from the autogenerated form prefix (Author_books+)
self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_primary(self):
person = Person.objects.create(firstname='Imelda')
item = OutfitItem.objects.create(name='Shoes')
# Imelda likes shoes, but can't carry her own bags.
data = {
'shoppingweakness_set-TOTAL_FORMS': 1,
'shoppingweakness_set-INITIAL_FORMS': 0,
'shoppingweakness_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'person': person.id,
'max_weight': 0,
'shoppingweakness_set-0-item': item.id,
}
response = self.client.post(reverse('admin:admin_inlines_fashionista_add'), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(Fashionista.objects.filter(person__firstname='Imelda')), 1)
def test_tabular_inline_column_css_class(self):
"""
Field names are included in the context to output a field-specific
CSS class name in the column headers.
"""
response = self.client.get(reverse('admin:admin_inlines_poll_add'))
text_field, call_me_field = list(response.context['inline_admin_formset'].fields())
# Editable field.
self.assertEqual(text_field['name'], 'text')
self.assertContains(response, '<th class="column-text required">')
# Read-only field.
self.assertEqual(call_me_field['name'], 'call_me')
self.assertContains(response, '<th class="column-call_me">')
def test_custom_form_tabular_inline_label(self):
"""
A model form with a form field specified (TitleForm.title1) should have
its label rendered in the tabular inline.
"""
response = self.client.get(reverse('admin:admin_inlines_titlecollection_add'))
self.assertContains(response, '<th class="column-title1 required">Title1</th>', html=True)
def test_custom_form_tabular_inline_extra_field_label(self):
response = self.client.get(reverse('admin:admin_inlines_outfititem_add'))
_, extra_field = list(response.context['inline_admin_formset'].fields())
self.assertEqual(extra_field['label'], 'Extra field')
def test_non_editable_custom_form_tabular_inline_extra_field_label(self):
response = self.client.get(reverse('admin:admin_inlines_chapter_add'))
_, extra_field = list(response.context['inline_admin_formset'].fields())
self.assertEqual(extra_field['label'], 'Extra field')
def test_custom_form_tabular_inline_overridden_label(self):
"""
SomeChildModelForm.__init__() overrides the label of a form field.
That label is displayed in the TabularInline.
"""
response = self.client.get(reverse('admin:admin_inlines_someparentmodel_add'))
field = list(response.context['inline_admin_formset'].fields())[0]
self.assertEqual(field['label'], 'new label')
self.assertContains(response, '<th class="column-name required">New label</th>', html=True)
def test_tabular_non_field_errors(self):
"""
non_field_errors are displayed correctly, including the correct value
for colspan.
"""
data = {
'title_set-TOTAL_FORMS': 1,
'title_set-INITIAL_FORMS': 0,
'title_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'title_set-0-title1': 'a title',
'title_set-0-title2': 'a different title',
}
response = self.client.post(reverse('admin:admin_inlines_titlecollection_add'), data)
# Here colspan is "4": two fields (title1 and title2), one hidden field and the delete checkbox.
self.assertContains(
response,
'<tr class="row-form-errors"><td colspan="4"><ul class="errorlist nonfield">'
'<li>The two titles must be the same</li></ul></td></tr>'
)
def test_no_parent_callable_lookup(self):
"""Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable"""
# Identically named callable isn't present in the parent ModelAdmin,
# rendering of the add view shouldn't explode
response = self.client.get(reverse('admin:admin_inlines_novel_add'))
# View should have the child inlines section
self.assertContains(
response,
'<div class="js-inline-admin-formset inline-group" id="chapter_set-group"'
)
def test_callable_lookup(self):
"""Admin inline should invoke local callable when its name is listed in readonly_fields"""
response = self.client.get(reverse('admin:admin_inlines_poll_add'))
# Add parent object view should have the child inlines section
self.assertContains(
response,
'<div class="js-inline-admin-formset inline-group" id="question_set-group"'
)
# The right callable should be used for the inline readonly_fields
# column cells
self.assertContains(response, '<p>Callable in QuestionInline</p>')
def test_help_text(self):
"""
The inlines' model field help texts are displayed when using both the
stacked and tabular layouts.
"""
response = self.client.get(reverse('admin:admin_inlines_holder4_add'))
self.assertContains(response, '<div class="help">Awesome stacked help text is awesome.</div>', 4)
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Awesome tabular help text is awesome.)" '
'title="Awesome tabular help text is awesome.">',
1
)
# ReadOnly fields
response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add'))
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Help text for ReadOnlyInline)" '
'title="Help text for ReadOnlyInline">',
1
)
def test_tabular_model_form_meta_readonly_field(self):
"""
Tabular inlines use ModelForm.Meta.help_texts and labels for read-only
fields.
"""
response = self.client.get(reverse('admin:admin_inlines_someparentmodel_add'))
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Help text from ModelForm.Meta)" '
'title="Help text from ModelForm.Meta">'
)
self.assertContains(response, 'Label from ModelForm.Meta')
def test_inline_hidden_field_no_column(self):
"""#18263 -- Make sure hidden fields don't get a column in tabular inlines"""
parent = SomeParentModel.objects.create(name='a')
SomeChildModel.objects.create(name='b', position='0', parent=parent)
SomeChildModel.objects.create(name='c', position='1', parent=parent)
response = self.client.get(reverse('admin:admin_inlines_someparentmodel_change', args=(parent.pk,)))
self.assertNotContains(response, '<td class="field-position">')
self.assertInHTML(
'<input id="id_somechildmodel_set-1-position" '
'name="somechildmodel_set-1-position" type="hidden" value="1">',
response.rendered_content,
)
def test_non_related_name_inline(self):
"""
Multiple inlines with related_name='+' have correct form prefixes.
"""
response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add'))
self.assertContains(response, '<input type="hidden" name="-1-0-id" id="id_-1-0-id">', html=True)
self.assertContains(
response,
'<input type="hidden" name="-1-0-capo_famiglia" id="id_-1-0-capo_famiglia">',
html=True
)
self.assertContains(
response,
'<input id="id_-1-0-name" type="text" class="vTextField" name="-1-0-name" maxlength="100">',
html=True
)
self.assertContains(response, '<input type="hidden" name="-2-0-id" id="id_-2-0-id">', html=True)
self.assertContains(
response,
'<input type="hidden" name="-2-0-capo_famiglia" id="id_-2-0-capo_famiglia">',
html=True
)
self.assertContains(
response,
'<input id="id_-2-0-name" type="text" class="vTextField" name="-2-0-name" maxlength="100">',
html=True
)
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_localize_pk_shortcut(self):
"""
The "View on Site" link is correct for locales that use thousand
separators.
"""
holder = Holder.objects.create(pk=123456789, dummy=42)
inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly='')
response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.id,)))
inner_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(inner).pk, inner.pk)
self.assertContains(response, inner_shortcut)
def test_custom_pk_shortcut(self):
"""
The "View on Site" link is correct for models with a custom primary key
field.
"""
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
child1 = ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
child2 = ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',)))
child1_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child1).pk, child1.pk)
child2_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child2).pk, child2.pk)
self.assertContains(response, child1_shortcut)
self.assertContains(response, child2_shortcut)
def test_create_inlines_on_inherited_model(self):
"""
An object can be created with inlines when it inherits another class.
"""
data = {
'name': 'Martian',
'sighting_set-TOTAL_FORMS': 1,
'sighting_set-INITIAL_FORMS': 0,
'sighting_set-MAX_NUM_FORMS': 0,
'sighting_set-0-place': 'Zone 51',
'_save': 'Save',
}
response = self.client.post(reverse('admin:admin_inlines_extraterrestrial_add'), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Sighting.objects.filter(et__name='Martian').count(), 1)
def test_custom_get_extra_form(self):
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
# The maximum number of forms should respect 'get_max_num' on the
# ModelAdmin
max_forms_input = (
'<input id="id_binarytree_set-MAX_NUM_FORMS" '
'name="binarytree_set-MAX_NUM_FORMS" type="hidden" value="%d">'
)
# The total number of forms will remain the same in either case
total_forms_hidden = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="2">'
)
response = self.client.get(reverse('admin:admin_inlines_binarytree_add'))
self.assertInHTML(max_forms_input % 3, response.rendered_content)
self.assertInHTML(total_forms_hidden, response.rendered_content)
response = self.client.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,)))
self.assertInHTML(max_forms_input % 2, response.rendered_content)
self.assertInHTML(total_forms_hidden, response.rendered_content)
def test_min_num(self):
"""
min_num and extra determine number of forms.
"""
class MinNumInline(TabularInline):
model = BinaryTree
min_num = 2
extra = 3
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = (
'<input id="id_binarytree_set-MIN_NUM_FORMS" '
'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="2">'
)
total_forms = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="5">'
)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_add'))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertInHTML(min_forms, response.rendered_content)
self.assertInHTML(total_forms, response.rendered_content)
def test_custom_min_num(self):
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
class MinNumInline(TabularInline):
model = BinaryTree
extra = 3
def get_min_num(self, request, obj=None, **kwargs):
if obj:
return 5
return 2
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = (
'<input id="id_binarytree_set-MIN_NUM_FORMS" '
'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="%d">'
)
total_forms = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="%d">'
)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_add'))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertInHTML(min_forms % 2, response.rendered_content)
self.assertInHTML(total_forms % 5, response.rendered_content)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(bt_head.id))
self.assertInHTML(min_forms % 5, response.rendered_content)
self.assertInHTML(total_forms % 8, response.rendered_content)
def test_inline_nonauto_noneditable_pk(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
self.assertContains(
response,
'<input id="id_nonautopkbook_set-0-rand_pk" '
'name="nonautopkbook_set-0-rand_pk" type="hidden">',
html=True
)
self.assertContains(
response,
'<input id="id_nonautopkbook_set-2-0-rand_pk" '
'name="nonautopkbook_set-2-0-rand_pk" type="hidden">',
html=True
)
def test_inline_nonauto_noneditable_inherited_pk(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
self.assertContains(
response,
'<input id="id_nonautopkbookchild_set-0-nonautopkbook_ptr" '
'name="nonautopkbookchild_set-0-nonautopkbook_ptr" type="hidden">',
html=True
)
self.assertContains(
response,
'<input id="id_nonautopkbookchild_set-2-nonautopkbook_ptr" '
'name="nonautopkbookchild_set-2-nonautopkbook_ptr" type="hidden">',
html=True
)
def test_inline_editable_pk(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
self.assertContains(
response,
'<input class="vIntegerField" id="id_editablepkbook_set-0-manual_pk" '
'name="editablepkbook_set-0-manual_pk" type="number">',
html=True, count=1
)
self.assertContains(
response,
'<input class="vIntegerField" id="id_editablepkbook_set-2-0-manual_pk" '
'name="editablepkbook_set-2-0-manual_pk" type="number">',
html=True, count=1
)
def test_stacked_inline_edit_form_contains_has_original_class(self):
holder = Holder.objects.create(dummy=1)
holder.inner_set.create(dummy=1)
response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.pk,)))
self.assertContains(
response,
'<div class="inline-related has_original" id="inner_set-0">',
count=1
)
self.assertContains(
response,
'<div class="inline-related" id="inner_set-1">',
count=1
)
def test_inlines_show_change_link_registered(self):
"Inlines `show_change_link` for registered models when enabled."
holder = Holder4.objects.create(dummy=1)
item1 = Inner4Stacked.objects.create(dummy=1, holder=holder)
item2 = Inner4Tabular.objects.create(dummy=1, holder=holder)
items = (
('inner4stacked', item1.pk),
('inner4tabular', item2.pk),
)
response = self.client.get(reverse('admin:admin_inlines_holder4_change', args=(holder.pk,)))
self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model)
for model, pk in items:
url = reverse('admin:admin_inlines_%s_change' % model, args=(pk,))
self.assertContains(response, '<a href="%s" %s' % (url, INLINE_CHANGELINK_HTML))
def test_inlines_show_change_link_unregistered(self):
"Inlines `show_change_link` disabled for unregistered models."
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',)))
self.assertFalse(response.context['inline_admin_formset'].opts.has_registered_model)
self.assertNotContains(response, INLINE_CHANGELINK_HTML)
def test_tabular_inline_show_change_link_false_registered(self):
"Inlines `show_change_link` disabled by default."
poll = Poll.objects.create(name="New poll")
Question.objects.create(poll=poll)
response = self.client.get(reverse('admin:admin_inlines_poll_change', args=(poll.pk,)))
self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model)
self.assertNotContains(response, INLINE_CHANGELINK_HTML)
def test_noneditable_inline_has_field_inputs(self):
"""Inlines without change permission shows field inputs on add form."""
response = self.client.get(reverse('admin:admin_inlines_novelreadonlychapter_add'))
self.assertContains(
response,
'<input type="text" name="chapter_set-0-name" '
'class="vTextField" maxlength="40" id="id_chapter_set-0-name">',
html=True
)
def test_inlines_singular_heading_one_to_one(self):
response = self.client.get(reverse('admin:admin_inlines_person_add'))
self.assertContains(response, '<h2>Author</h2>', html=True) # Tabular.
self.assertContains(response, '<h2>Fashionista</h2>', html=True) # Stacked.
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInlineMedia(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_inline_media_only_base(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
def test_inline_media_only_inline(self):
holder = Holder3(dummy=13)
holder.save()
Inner3(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder3_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertEqual(
response.context['inline_admin_formsets'][0].media._js,
[
'admin/js/vendor/jquery/jquery.min.js',
'my_awesome_inline_scripts.js',
'custom_number.js',
'admin/js/jquery.init.js',
'admin/js/inlines.js',
]
)
self.assertContains(response, 'my_awesome_inline_scripts.js')
def test_all_inline_media(self):
holder = Holder2(dummy=13)
holder.save()
Inner2(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder2_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
self.assertContains(response, 'my_awesome_inline_scripts.js')
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInlineAdminForm(TestCase):
def test_immutable_content_type(self):
"""Regression for #9362
The problem depends only on InlineAdminForm and its "original"
argument, so we can safely set the other arguments to None/{}. We just
need to check that the content_type argument of Child isn't altered by
the internals of the inline form."""
sally = Teacher.objects.create(name='Sally')
john = Parent.objects.create(name='John')
joe = Child.objects.create(name='Joe', teacher=sally, parent=john)
iaf = InlineAdminForm(None, None, {}, {}, joe)
parent_ct = ContentType.objects.get_for_model(Parent)
self.assertEqual(iaf.original.content_type, parent_ct)
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInlineProtectedOnDelete(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_deleting_inline_with_protected_delete_does_not_validate(self):
lotr = Novel.objects.create(name='Lord of the rings')
chapter = Chapter.objects.create(novel=lotr, name='Many Meetings')
foot_note = FootNote.objects.create(chapter=chapter, note='yadda yadda')
change_url = reverse('admin:admin_inlines_novel_change', args=(lotr.id,))
response = self.client.get(change_url)
data = {
'name': lotr.name,
'chapter_set-TOTAL_FORMS': 1,
'chapter_set-INITIAL_FORMS': 1,
'chapter_set-MAX_NUM_FORMS': 1000,
'_save': 'Save',
'chapter_set-0-id': chapter.id,
'chapter_set-0-name': chapter.name,
'chapter_set-0-novel': lotr.id,
'chapter_set-0-DELETE': 'on'
}
response = self.client.post(change_url, data)
self.assertContains(response, "Deleting chapter %s would require deleting "
"the following protected related objects: foot note %s"
% (chapter, foot_note))
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInlinePermissions(TestCase):
"""
Make sure the admin respects permissions for objects that are edited
inline. Refs #8060.
"""
@classmethod
def setUpTestData(cls):
cls.user = User(username='admin', is_staff=True, is_active=True)
cls.user.set_password('secret')
cls.user.save()
cls.author_ct = ContentType.objects.get_for_model(Author)
cls.holder_ct = ContentType.objects.get_for_model(Holder2)
cls.book_ct = ContentType.objects.get_for_model(Book)
cls.inner_ct = ContentType.objects.get_for_model(Inner2)
# User always has permissions to add and change Authors, and Holders,
# the main (parent) models of the inlines. Permissions on the inlines
# vary per test.
permission = Permission.objects.get(codename='add_author', content_type=cls.author_ct)
cls.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_author', content_type=cls.author_ct)
cls.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='add_holder2', content_type=cls.holder_ct)
cls.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_holder2', content_type=cls.holder_ct)
cls.user.user_permissions.add(permission)
author = Author.objects.create(pk=1, name='The Author')
cls.book = author.books.create(name='The inline Book')
cls.author_change_url = reverse('admin:admin_inlines_author_change', args=(author.id,))
# Get the ID of the automatically created intermediate model for the Author-Book m2m
author_book_auto_m2m_intermediate = Author.books.through.objects.get(author=author, book=cls.book)
cls.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk
cls.holder = Holder2.objects.create(dummy=13)
cls.inner2 = Inner2.objects.create(dummy=42, holder=cls.holder)
def setUp(self):
self.holder_change_url = reverse('admin:admin_inlines_holder2_change', args=(self.holder.id,))
self.client.force_login(self.user)
def test_inline_add_m2m_noperm(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_noperm(self):
response = self.client.get(reverse('admin:admin_inlines_holder2_add'))
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_change_m2m_noperm(self):
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_change_fk_noperm(self):
response = self.client.get(self.holder_change_url)
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_add_m2m_view_only_perm(self):
permission = Permission.objects.get(codename='view_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# View-only inlines. (It could be nicer to hide the empty, non-editable
# inlines on the add page.)
self.assertIs(response.context['inline_admin_formset'].has_view_permission, True)
self.assertIs(response.context['inline_admin_formset'].has_add_permission, False)
self.assertIs(response.context['inline_admin_formset'].has_change_permission, False)
self.assertIs(response.context['inline_admin_formset'].has_delete_permission, False)
self.assertContains(response, '<h2>Author-book relationships</h2>')
self.assertContains(
response,
'<input type="hidden" name="Author_books-TOTAL_FORMS" value="0" '
'id="id_Author_books-TOTAL_FORMS">',
html=True,
)
self.assertNotContains(response, 'Add another Author-Book Relationship')
def test_inline_add_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# No change permission on Books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(reverse('admin:admin_inlines_holder2_add'))
# Add permission on inner2s, so we get the inline
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="3" name="inner2_set-TOTAL_FORMS">', html=True)
def test_inline_change_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
self.assertNotContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_m2m_view_only_perm(self):
permission = Permission.objects.get(codename='view_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# View-only inlines.
self.assertIs(response.context['inline_admin_formset'].has_view_permission, True)
self.assertIs(response.context['inline_admin_formset'].has_add_permission, False)
self.assertIs(response.context['inline_admin_formset'].has_change_permission, False)
self.assertIs(response.context['inline_admin_formset'].has_delete_permission, False)
self.assertContains(response, '<h2>Author-book relationships</h2>')
self.assertContains(
response,
'<input type="hidden" name="Author_books-TOTAL_FORMS" value="1" '
'id="id_Author_books-TOTAL_FORMS">',
html=True,
)
# The field in the inline is read-only.
self.assertContains(response, '<p>%s</p>' % self.book)
self.assertNotContains(
response,
'<input type="checkbox" name="Author_books-0-DELETE" id="id_Author_books-0-DELETE">',
html=True,
)
def test_inline_change_m2m_change_perm(self):
permission = Permission.objects.get(codename='change_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# We have change perm on books, so we can add/change/delete inlines
self.assertIs(response.context['inline_admin_formset'].has_view_permission, True)
self.assertIs(response.context['inline_admin_formset'].has_add_permission, True)
self.assertIs(response.context['inline_admin_formset'].has_change_permission, True)
self.assertIs(response.context['inline_admin_formset'].has_delete_permission, True)
self.assertContains(response, '<h2>Author-book relationships</h2>')
self.assertContains(response, 'Add another Author-book relationship')
self.assertContains(response, '<input type="hidden" id="id_Author_books-TOTAL_FORMS" '
'value="4" name="Author_books-TOTAL_FORMS">', html=True)
self.assertContains(
response,
'<input type="hidden" id="id_Author_books-0-id" value="%i" '
'name="Author_books-0-id">' % self.author_book_auto_m2m_intermediate_id,
html=True
)
self.assertContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add permission on inner2s, so we can add but not modify existing
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
# 3 extra forms only, not the existing instance form
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="3" '
'name="inner2_set-TOTAL_FORMS">',
html=True
)
self.assertNotContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id,
html=True
)
def test_inline_change_fk_change_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change permission on inner2s, so we can change existing but not add new
self.assertContains(response, '<h2>Inner2s</h2>', count=2)
# Just the one form for existing instances
self.assertContains(
response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" name="inner2_set-TOTAL_FORMS">',
html=True
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id,
html=True
)
# max-num 0 means we can't add new ones
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-MAX_NUM_FORMS" value="0" name="inner2_set-MAX_NUM_FORMS">',
html=True
)
# TabularInline
self.assertContains(response, '<th class="column-dummy required">Dummy</th>', html=True)
self.assertContains(
response,
'<input type="number" name="inner2_set-2-0-dummy" value="%s" '
'class="vIntegerField" id="id_inner2_set-2-0-dummy">' % self.inner2.dummy,
html=True,
)
def test_inline_change_fk_add_change_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add/change perm, so we can add new and change existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance and three extra for new
self.assertContains(
response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" name="inner2_set-TOTAL_FORMS">',
html=True
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id,
html=True
)
def test_inline_change_fk_change_del_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change/delete perm on inner2s, so we can change/delete existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, no new
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" name="inner2_set-TOTAL_FORMS">',
html=True
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id,
html=True
)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
def test_inline_change_fk_all_perms(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# All perms on inner2s, so we can add/change/delete
self.assertContains(response, '<h2>Inner2s</h2>', count=2)
# One form for existing instance only, three for new
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" name="inner2_set-TOTAL_FORMS">',
html=True
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id,
html=True
)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
# TabularInline
self.assertContains(response, '<th class="column-dummy required">Dummy</th>', html=True)
self.assertContains(
response,
'<input type="number" name="inner2_set-2-0-dummy" value="%s" '
'class="vIntegerField" id="id_inner2_set-2-0-dummy">' % self.inner2.dummy,
html=True,
)
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestReadOnlyChangeViewInlinePermissions(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user('testing', password='password', is_staff=True)
cls.user.user_permissions.add(
Permission.objects.get(codename='view_poll', content_type=ContentType.objects.get_for_model(Poll))
)
cls.user.user_permissions.add(
*Permission.objects.filter(
codename__endswith="question", content_type=ContentType.objects.get_for_model(Question)
).values_list('pk', flat=True)
)
cls.poll = Poll.objects.create(name="Survey")
cls.add_url = reverse('admin:admin_inlines_poll_add')
cls.change_url = reverse('admin:admin_inlines_poll_change', args=(cls.poll.id,))
def setUp(self):
self.client.force_login(self.user)
def test_add_url_not_allowed(self):
response = self.client.get(self.add_url)
self.assertEqual(response.status_code, 403)
response = self.client.post(self.add_url, {})
self.assertEqual(response.status_code, 403)
def test_post_to_change_url_not_allowed(self):
response = self.client.post(self.change_url, {})
self.assertEqual(response.status_code, 403)
def test_get_to_change_url_is_allowed(self):
response = self.client.get(self.change_url)
self.assertEqual(response.status_code, 200)
def test_main_model_is_rendered_as_read_only(self):
response = self.client.get(self.change_url)
self.assertContains(
response,
'<div class="readonly">%s</div>' % self.poll.name,
html=True
)
input = '<input type="text" name="name" value="%s" class="vTextField" maxlength="40" required id="id_name">'
self.assertNotContains(
response,
input % self.poll.name,
html=True
)
def test_inlines_are_rendered_as_read_only(self):
question = Question.objects.create(text="How will this be rendered?", poll=self.poll)
response = self.client.get(self.change_url)
self.assertContains(
response,
'<td class="field-text"><p>%s</p></td>' % question.text,
html=True
)
self.assertNotContains(response, 'id="id_question_set-0-text"')
self.assertNotContains(response, 'id="id_related_objs-0-DELETE"')
def test_submit_line_shows_only_close_button(self):
response = self.client.get(self.change_url)
self.assertContains(
response,
'<a href="/admin/admin_inlines/poll/" class="closelink">Close</a>',
html=True
)
delete_link = '<p class="deletelink-box"><a href="/admin/admin_inlines/poll/%s/delete/" class="deletelink">Delete</a></p>' # noqa
self.assertNotContains(
response,
delete_link % self.poll.id,
html=True
)
self.assertNotContains(response, '<input type="submit" value="Save and add another" name="_addanother">')
self.assertNotContains(response, '<input type="submit" value="Save and continue editing" name="_continue">')
def test_inline_delete_buttons_are_not_shown(self):
Question.objects.create(text="How will this be rendered?", poll=self.poll)
response = self.client.get(self.change_url)
self.assertNotContains(
response,
'<input type="checkbox" name="question_set-0-DELETE" id="id_question_set-0-DELETE">',
html=True
)
def test_extra_inlines_are_not_shown(self):
response = self.client.get(self.change_url)
self.assertNotContains(response, 'id="id_question_set-0-text"')
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class SeleniumTests(AdminSeleniumTestCase):
available_apps = ['admin_inlines'] + AdminSeleniumTestCase.available_apps
def setUp(self):
User.objects.create_superuser(username='super', password='secret', email='super@example.com')
def test_add_stackeds(self):
"""
The "Add another XXX" link correctly adds items to the stacked formset.
"""
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add'))
inline_id = '#inner4stacked_set-group'
def rows_length():
return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
self.assertEqual(rows_length(), 4)
def test_delete_stackeds(self):
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add'))
inline_id = '#inner4stacked_set-group'
def rows_length():
return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
add_button.click()
self.assertEqual(rows_length(), 5, msg="sanity check")
for delete_link in self.selenium.find_elements_by_css_selector('%s .inline-deletelink' % inline_id):
delete_link.click()
with self.disable_implicit_wait():
self.assertEqual(rows_length(), 0)
def test_delete_invalid_stacked_inlines(self):
from selenium.common.exceptions import NoSuchElementException
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add'))
inline_id = '#inner4stacked_set-group'
def rows_length():
return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
add_button.click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector('#id_inner4stacked_set-4-dummy')), 1)
# Enter some data and click 'Save'.
self.selenium.find_element_by_name('dummy').send_keys('1')
self.selenium.find_element_by_name('inner4stacked_set-0-dummy').send_keys('100')
self.selenium.find_element_by_name('inner4stacked_set-1-dummy').send_keys('101')
self.selenium.find_element_by_name('inner4stacked_set-2-dummy').send_keys('222')
self.selenium.find_element_by_name('inner4stacked_set-3-dummy').send_keys('103')
self.selenium.find_element_by_name('inner4stacked_set-4-dummy').send_keys('222')
with self.wait_page_loaded():
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.assertEqual(rows_length(), 5, msg="sanity check")
errorlist = self.selenium.find_element_by_css_selector(
'%s .dynamic-inner4stacked_set .errorlist li' % inline_id
)
self.assertEqual('Please correct the duplicate values below.', errorlist.text)
delete_link = self.selenium.find_element_by_css_selector('#inner4stacked_set-4 .inline-deletelink')
delete_link.click()
self.assertEqual(rows_length(), 4)
with self.disable_implicit_wait(), self.assertRaises(NoSuchElementException):
self.selenium.find_element_by_css_selector('%s .dynamic-inner4stacked_set .errorlist li' % inline_id)
with self.wait_page_loaded():
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
# The objects have been created in the database.
self.assertEqual(Inner4Stacked.objects.all().count(), 4)
def test_delete_invalid_tabular_inlines(self):
from selenium.common.exceptions import NoSuchElementException
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add'))
inline_id = '#inner4tabular_set-group'
def rows_length():
return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4tabular_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 tabular')
add_button.click()
add_button.click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector('#id_inner4tabular_set-4-dummy')), 1)
# Enter some data and click 'Save'.
self.selenium.find_element_by_name('dummy').send_keys('1')
self.selenium.find_element_by_name('inner4tabular_set-0-dummy').send_keys('100')
self.selenium.find_element_by_name('inner4tabular_set-1-dummy').send_keys('101')
self.selenium.find_element_by_name('inner4tabular_set-2-dummy').send_keys('222')
self.selenium.find_element_by_name('inner4tabular_set-3-dummy').send_keys('103')
self.selenium.find_element_by_name('inner4tabular_set-4-dummy').send_keys('222')
with self.wait_page_loaded():
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.assertEqual(rows_length(), 5, msg="sanity check")
# Non-field errorlist is in its own <tr> just before
# tr#inner4tabular_set-3:
errorlist = self.selenium.find_element_by_css_selector(
'%s #inner4tabular_set-3 + .row-form-errors .errorlist li' % inline_id
)
self.assertEqual('Please correct the duplicate values below.', errorlist.text)
delete_link = self.selenium.find_element_by_css_selector('#inner4tabular_set-4 .inline-deletelink')
delete_link.click()
self.assertEqual(rows_length(), 4)
with self.disable_implicit_wait(), self.assertRaises(NoSuchElementException):
self.selenium.find_element_by_css_selector('%s .dynamic-inner4tabular_set .errorlist li' % inline_id)
with self.wait_page_loaded():
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
# The objects have been created in the database.
self.assertEqual(Inner4Tabular.objects.all().count(), 4)
def test_add_inlines(self):
"""
The "Add another XXX" link correctly adds items to the inline form.
"""
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add'))
# There's only one inline to start with and it has the correct ID.
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 1)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[0].get_attribute('id'),
'profile_set-0')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-last_name]')), 1)
# Add an inline
self.selenium.find_element_by_link_text('Add another Profile').click()
# The inline has been added, it has the right id, and it contains the
# correct fields.
self.assertEqual(len(self.selenium.find_elements_by_css_selector('.dynamic-profile_set')), 2)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[1].get_attribute('id'), 'profile_set-1')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-last_name]')), 1)
# Let's add another one to be sure
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector('.dynamic-profile_set')), 3)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[2].get_attribute('id'), 'profile_set-2')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-last_name]')), 1)
# Enter some data and click 'Save'
self.selenium.find_element_by_name('profile_set-0-first_name').send_keys('0 first name 1')
self.selenium.find_element_by_name('profile_set-0-last_name').send_keys('0 last name 2')
self.selenium.find_element_by_name('profile_set-1-first_name').send_keys('1 first name 1')
self.selenium.find_element_by_name('profile_set-1-last_name').send_keys('1 last name 2')
self.selenium.find_element_by_name('profile_set-2-first_name').send_keys('2 first name 1')
self.selenium.find_element_by_name('profile_set-2-last_name').send_keys('2 last name 2')
with self.wait_page_loaded():
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
# The objects have been created in the database
self.assertEqual(ProfileCollection.objects.all().count(), 1)
self.assertEqual(Profile.objects.all().count(), 3)
def test_add_inline_link_absent_for_view_only_parent_model(self):
from selenium.common.exceptions import NoSuchElementException
user = User.objects.create_user('testing', password='password', is_staff=True)
user.user_permissions.add(
Permission.objects.get(codename='view_poll', content_type=ContentType.objects.get_for_model(Poll))
)
user.user_permissions.add(
*Permission.objects.filter(
codename__endswith="question", content_type=ContentType.objects.get_for_model(Question)
).values_list('pk', flat=True)
)
self.admin_login(username='testing', password='password')
poll = Poll.objects.create(name="Survey")
change_url = reverse('admin:admin_inlines_poll_change', args=(poll.id,))
self.selenium.get(self.live_server_url + change_url)
with self.disable_implicit_wait():
with self.assertRaises(NoSuchElementException):
self.selenium.find_element_by_link_text('Add another Question')
def test_delete_inlines(self):
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add'))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 5)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-3')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-4')), 1)
# Click on a few delete buttons
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1 td.delete a').click()
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2 td.delete a').click()
# The rows are gone and the IDs have been re-sequenced
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 3)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
def test_collapsed_inlines(self):
# Collapsed inlines have SHOW/HIDE links.
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_author_add'))
# One field is in a stacked inline, other in a tabular one.
test_fields = ['#id_nonautopkbook_set-0-title', '#id_nonautopkbook_set-2-0-title']
show_links = self.selenium.find_elements_by_link_text('SHOW')
self.assertEqual(len(show_links), 3)
for show_index, field_name in enumerate(test_fields, 0):
self.wait_until_invisible(field_name)
show_links[show_index].click()
self.wait_until_visible(field_name)
hide_links = self.selenium.find_elements_by_link_text('HIDE')
self.assertEqual(len(hide_links), 2)
for hide_index, field_name in enumerate(test_fields, 0):
self.wait_until_visible(field_name)
hide_links[hide_index].click()
self.wait_until_invisible(field_name)
def test_added_stacked_inline_with_collapsed_fields(self):
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_teacher_add'))
self.selenium.find_element_by_link_text('Add another Child').click()
test_fields = ['#id_child_set-0-name', '#id_child_set-1-name']
show_links = self.selenium.find_elements_by_link_text('SHOW')
self.assertEqual(len(show_links), 2)
for show_index, field_name in enumerate(test_fields, 0):
self.wait_until_invisible(field_name)
show_links[show_index].click()
self.wait_until_visible(field_name)
hide_links = self.selenium.find_elements_by_link_text('HIDE')
self.assertEqual(len(hide_links), 2)
for hide_index, field_name in enumerate(test_fields, 0):
self.wait_until_visible(field_name)
hide_links[hide_index].click()
self.wait_until_invisible(field_name)
def assertBorder(self, element, border):
width, style, color = border.split(' ')
border_properties = [
'border-bottom-%s',
'border-left-%s',
'border-right-%s',
'border-top-%s',
]
for prop in border_properties:
prop = prop % 'width'
self.assertEqual(element.value_of_css_property(prop), width)
for prop in border_properties:
prop = prop % 'style'
self.assertEqual(element.value_of_css_property(prop), style)
# Convert hex color to rgb.
self.assertRegex(color, '#[0-9a-f]{6}')
r, g, b = int(color[1:3], 16), int(color[3:5], 16), int(color[5:], 16)
# The value may be expressed as either rgb() or rgba() depending on the
# browser.
colors = [
'rgb(%d, %d, %d)' % (r, g, b),
'rgba(%d, %d, %d, 1)' % (r, g, b),
]
for prop in border_properties:
prop = prop % 'color'
self.assertIn(element.value_of_css_property(prop), colors)
def test_inline_formset_error_input_border(self):
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder5_add'))
self.wait_until_visible('#id_dummy')
self.selenium.find_element_by_id('id_dummy').send_keys(1)
fields = ['id_inner5stacked_set-0-dummy', 'id_inner5tabular_set-0-dummy']
show_links = self.selenium.find_elements_by_link_text('SHOW')
for show_index, field_name in enumerate(fields):
show_links[show_index].click()
self.wait_until_visible('#' + field_name)
self.selenium.find_element_by_id(field_name).send_keys(1)
# Before save all inputs have default border
for inline in ('stacked', 'tabular'):
for field_name in ('name', 'select', 'text'):
element_id = 'id_inner5%s_set-0-%s' % (inline, field_name)
self.assertBorder(
self.selenium.find_element_by_id(element_id),
'1px solid #cccccc',
)
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
# Test the red border around inputs by css selectors
stacked_selectors = ['.errors input', '.errors select', '.errors textarea']
for selector in stacked_selectors:
self.assertBorder(
self.selenium.find_element_by_css_selector(selector),
'1px solid #ba2121',
)
tabular_selectors = [
'td ul.errorlist + input', 'td ul.errorlist + select', 'td ul.errorlist + textarea'
]
for selector in tabular_selectors:
self.assertBorder(
self.selenium.find_element_by_css_selector(selector),
'1px solid #ba2121',
)
def test_inline_formset_error(self):
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder5_add'))
stacked_inline_formset_selector = 'div#inner5stacked_set-group fieldset.module.collapse'
tabular_inline_formset_selector = 'div#inner5tabular_set-group fieldset.module.collapse'
# Inlines without errors, both inlines collapsed
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.assertEqual(
len(self.selenium.find_elements_by_css_selector(stacked_inline_formset_selector + '.collapsed')), 1
)
self.assertEqual(
len(self.selenium.find_elements_by_css_selector(tabular_inline_formset_selector + '.collapsed')), 1
)
show_links = self.selenium.find_elements_by_link_text('SHOW')
self.assertEqual(len(show_links), 2)
# Inlines with errors, both inlines expanded
test_fields = ['#id_inner5stacked_set-0-dummy', '#id_inner5tabular_set-0-dummy']
for show_index, field_name in enumerate(test_fields):
show_links[show_index].click()
self.wait_until_visible(field_name)
self.selenium.find_element_by_id(field_name[1:]).send_keys(1)
hide_links = self.selenium.find_elements_by_link_text('HIDE')
self.assertEqual(len(hide_links), 2)
for hide_index, field_name in enumerate(test_fields):
hide_link = hide_links[hide_index]
self.selenium.execute_script('window.scrollTo(0, %s);' % hide_link.location['y'])
hide_link.click()
self.wait_until_invisible(field_name)
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.assertEqual(
len(self.selenium.find_elements_by_css_selector(stacked_inline_formset_selector + '.collapsed')), 0
)
self.assertEqual(
len(self.selenium.find_elements_by_css_selector(tabular_inline_formset_selector + '.collapsed')), 0
)
self.assertEqual(
len(self.selenium.find_elements_by_css_selector(stacked_inline_formset_selector)), 1
)
self.assertEqual(
len(self.selenium.find_elements_by_css_selector(tabular_inline_formset_selector)), 1
)
| {
"content_hash": "a348afbb800ae45ed1de219af43381fd",
"timestamp": "",
"source": "github",
"line_count": 1353,
"max_line_length": 138,
"avg_line_length": 48.886917960088695,
"alnum_prop": 0.639649854862119,
"repo_name": "koordinates/django",
"id": "8728a9bc098eecd0ccf4dffbaf9dfa9f62c33a1f",
"size": "66144",
"binary": false,
"copies": "2",
"ref": "refs/heads/stable/3.2.x-kx",
"path": "tests/admin_inlines/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "84917"
},
{
"name": "HTML",
"bytes": "223820"
},
{
"name": "JavaScript",
"bytes": "139791"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "14472067"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "142"
}
],
"symlink_target": ""
} |
"""Tests for Plex server."""
from unittest.mock import patch
from plexapi.exceptions import BadRequest, NotFound
import pytest
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
DOMAIN as MEDIA_PLAYER_DOMAIN,
MEDIA_TYPE_EPISODE,
MEDIA_TYPE_MOVIE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_VIDEO,
SERVICE_PLAY_MEDIA,
)
from homeassistant.components.plex.const import DOMAIN
from homeassistant.components.plex.errors import MediaNotFound
from homeassistant.const import ATTR_ENTITY_ID
async def test_media_lookups(hass, mock_plex_server, requests_mock, playqueue_created):
"""Test media lookups to Plex server."""
# Plex Key searches
media_player_id = hass.states.async_entity_ids("media_player")[0]
requests_mock.post("/playqueues", text=playqueue_created)
requests_mock.get("/player/playback/playMedia", status_code=200)
assert await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: media_player_id,
ATTR_MEDIA_CONTENT_TYPE: DOMAIN,
ATTR_MEDIA_CONTENT_ID: 1,
},
True,
)
with pytest.raises(MediaNotFound) as excinfo:
with patch("plexapi.server.PlexServer.fetchItem", side_effect=NotFound):
assert await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: media_player_id,
ATTR_MEDIA_CONTENT_TYPE: DOMAIN,
ATTR_MEDIA_CONTENT_ID: 123,
},
True,
)
assert "Media for key 123 not found" in str(excinfo.value)
# TV show searches
with pytest.raises(MediaNotFound) as excinfo:
payload = '{"library_name": "Not a Library", "show_name": "TV Show"}'
assert await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: media_player_id,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_EPISODE,
ATTR_MEDIA_CONTENT_ID: payload,
},
True,
)
assert "Library 'Not a Library' not found in" in str(excinfo.value)
with patch("plexapi.library.LibrarySection.search") as search:
assert await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: media_player_id,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_EPISODE,
ATTR_MEDIA_CONTENT_ID: '{"library_name": "TV Shows", "show_name": "TV Show"}',
},
True,
)
search.assert_called_with(**{"show.title": "TV Show", "libtype": "show"})
assert await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: media_player_id,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_EPISODE,
ATTR_MEDIA_CONTENT_ID: '{"library_name": "TV Shows", "episode_name": "An Episode"}',
},
True,
)
search.assert_called_with(
**{"episode.title": "An Episode", "libtype": "episode"}
)
assert await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: media_player_id,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_EPISODE,
ATTR_MEDIA_CONTENT_ID: '{"library_name": "TV Shows", "show_name": "TV Show", "season_number": 1}',
},
True,
)
search.assert_called_with(
**{"show.title": "TV Show", "season.index": 1, "libtype": "season"}
)
assert await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: media_player_id,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_EPISODE,
ATTR_MEDIA_CONTENT_ID: '{"library_name": "TV Shows", "show_name": "TV Show", "season_number": 1, "episode_number": 3}',
},
True,
)
search.assert_called_with(
**{
"show.title": "TV Show",
"season.index": 1,
"episode.index": 3,
"libtype": "episode",
}
)
assert await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: media_player_id,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,
ATTR_MEDIA_CONTENT_ID: '{"library_name": "Music", "artist_name": "Artist"}',
},
True,
)
search.assert_called_with(**{"artist.title": "Artist", "libtype": "artist"})
assert await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: media_player_id,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,
ATTR_MEDIA_CONTENT_ID: '{"library_name": "Music", "album_name": "Album"}',
},
True,
)
search.assert_called_with(**{"album.title": "Album", "libtype": "album"})
assert await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: media_player_id,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,
ATTR_MEDIA_CONTENT_ID: '{"library_name": "Music", "artist_name": "Artist", "track_name": "Track 3"}',
},
True,
)
search.assert_called_with(
**{"artist.title": "Artist", "track.title": "Track 3", "libtype": "track"}
)
assert await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: media_player_id,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,
ATTR_MEDIA_CONTENT_ID: '{"library_name": "Music", "artist_name": "Artist", "album_name": "Album"}',
},
True,
)
search.assert_called_with(
**{"artist.title": "Artist", "album.title": "Album", "libtype": "album"}
)
assert await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: media_player_id,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,
ATTR_MEDIA_CONTENT_ID: '{"library_name": "Music", "artist_name": "Artist", "album_name": "Album", "track_number": 3}',
},
True,
)
search.assert_called_with(
**{
"artist.title": "Artist",
"album.title": "Album",
"track.index": 3,
"libtype": "track",
}
)
assert await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: media_player_id,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,
ATTR_MEDIA_CONTENT_ID: '{"library_name": "Music", "artist_name": "Artist", "album_name": "Album", "track_name": "Track 3"}',
},
True,
)
search.assert_called_with(
**{
"artist.title": "Artist",
"album.title": "Album",
"track.title": "Track 3",
"libtype": "track",
}
)
# Movie searches
assert await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: media_player_id,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_VIDEO,
ATTR_MEDIA_CONTENT_ID: '{"library_name": "Movies", "video_name": "Movie 1"}',
},
True,
)
search.assert_called_with(**{"movie.title": "Movie 1", "libtype": None})
assert await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: media_player_id,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MOVIE,
ATTR_MEDIA_CONTENT_ID: '{"library_name": "Movies", "title": "Movie 1"}',
},
True,
)
search.assert_called_with(**{"title": "Movie 1", "libtype": None})
with pytest.raises(MediaNotFound) as excinfo:
payload = '{"title": "Movie 1"}'
assert await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: media_player_id,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_VIDEO,
ATTR_MEDIA_CONTENT_ID: payload,
},
True,
)
assert "Must specify 'library_name' for this search" in str(excinfo.value)
with pytest.raises(MediaNotFound) as excinfo:
payload = '{"library_name": "Movies", "title": "Not a Movie"}'
with patch("plexapi.library.LibrarySection.search", side_effect=BadRequest):
assert await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: media_player_id,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_VIDEO,
ATTR_MEDIA_CONTENT_ID: payload,
},
True,
)
assert "Problem in query" in str(excinfo.value)
# Playlist searches
assert await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: media_player_id,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_PLAYLIST,
ATTR_MEDIA_CONTENT_ID: '{"playlist_name": "Playlist 1"}',
},
True,
)
with pytest.raises(MediaNotFound) as excinfo:
payload = '{"playlist_name": "Not a Playlist"}'
assert await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: media_player_id,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_PLAYLIST,
ATTR_MEDIA_CONTENT_ID: payload,
},
True,
)
assert "Playlist 'Not a Playlist' not found" in str(excinfo.value)
with pytest.raises(MediaNotFound) as excinfo:
payload = "{}"
assert await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: media_player_id,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_PLAYLIST,
ATTR_MEDIA_CONTENT_ID: payload,
},
True,
)
assert "Must specify 'playlist_name' for this search" in str(excinfo.value)
| {
"content_hash": "48a7b3a1d76e576380698ac911c8f13e",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 140,
"avg_line_length": 35.72025723472669,
"alnum_prop": 0.5229993698802773,
"repo_name": "toddeye/home-assistant",
"id": "e5c19d31c4ed821a90a63c0a346b089b0fab5e4f",
"size": "11109",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/plex/test_media_search.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
import pytest
from pycvs.cli import PyCvs
# Imports for mocking
import os.path
import shutil
import sys
def get_class(mocker):
init = mocker.patch.object(PyCvs, '__init__')
init.return_value = None
return PyCvs()
def test_no_parameter(mocker):
obj = get_class(mocker)
mocker.patch.object(sys, 'argv', ['pycvs'])
pint = mocker.patch('builtins.print')
obj.process()
pint.assert_called_once_with("Nothing to do")
def test_checkout_no_parameter(mocker):
obj = get_class(mocker)
mocker.patch.object(sys, 'argv', ['pycvs', 'checkout'])
pint = mocker.patch('builtins.print')
obj.process()
pint.assert_called_once_with("Missing arguments for checkout command")
def test_co_no_parameter(mocker):
obj = get_class(mocker)
mocker.patch.object(sys, 'argv', ['pycvs', 'co'])
pint = mocker.patch('builtins.print')
obj.process()
pint.assert_called_once_with("Missing arguments for co command")
def test_checkout_parameters(mocker):
obj = get_class(mocker)
mocker.patch.object(sys, 'argv', ['pycvs', 'co', 'module/', 'to', 'test'])
check = mocker.patch.object(PyCvs, '_checkout')
obj.process()
check.assert_called_once_with(['module/', 'to', 'test']) | {
"content_hash": "ac0d00791d51456eb9c5da99ef287b9f",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 25.3265306122449,
"alnum_prop": 0.6639806607574537,
"repo_name": "gerson23/pycvs",
"id": "a3a27136fd2b1e9d159cd42f409a4246825036b5",
"size": "1241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/test_process.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28964"
}
],
"symlink_target": ""
} |
from ui.accounts import AccountsForm | {
"content_hash": "85c9a84b543790882ea19452c1cffcc4",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 36,
"avg_line_length": 36,
"alnum_prop": 0.8888888888888888,
"repo_name": "baverman/cakeplant",
"id": "47df64e17e70c6ecc4618e818cddc17e54b78557",
"size": "36",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cakeplant/accounts/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1046"
},
{
"name": "Python",
"bytes": "51366"
},
{
"name": "Shell",
"bytes": "210"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from gppylib.commands.base import Command
from tinctest import logger
from mpp.lib.PSQL import PSQL
from mpp.models import MPPTestCase
import os
import re
import socket
import time
import shutil
import sys
import signal
class aoreadmemory(MPPTestCase):
def tearDown(self):
gpfaultinjector = Command('fault injector',
'source $GPHOME/greenplum_path.sh; '
'gpfaultinjector -f malloc_failure '
'-y reset -H ALL -r primary')
gpfaultinjector.run()
def test_ao_malloc_failure(self):
"""
@product_version gpdb: [4.3.5.1 -]
"""
PSQL.run_sql_command('DROP table if exists ao_read_malloc')
PSQL.run_sql_command('create table ao_read_malloc (a int) with (appendonly=true, compresstype=quicklz)')
PSQL.run_sql_command('insert into ao_read_malloc '
'select * from generate_series(1, 1000)')
gpfaultinjector = Command('fault injector',
'source $GPHOME/greenplum_path.sh; '
'gpfaultinjector -f malloc_failure '
'-y error -H ALL -r primary')
gpfaultinjector.run()
res ={'rc':0, 'stdout':'', 'stderr':''}
PSQL.run_sql_command(sql_cmd='select count(*) from ao_read_malloc', results=res)
logger.info(res)
self.assertTrue("ERROR: fault triggered" in res['stderr'])
self.assertFalse("ERROR: could not temporarily connect to one or more segments" in res['stderr'])
logger.info('Pass')
| {
"content_hash": "bb493b0e9331fe8d7f28aefdd7d0a30a",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 112,
"avg_line_length": 35.857142857142854,
"alnum_prop": 0.6502877379371403,
"repo_name": "edespino/gpdb",
"id": "7b4c616c92e2d42c2f144aff7464ca11fe71d6d1",
"size": "2282",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "src/test/tinc/tincrepo/mpp/gpdb/tests/storage/access_methods/ao_memory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3737"
},
{
"name": "Batchfile",
"bytes": "11369"
},
{
"name": "C",
"bytes": "36580146"
},
{
"name": "C++",
"bytes": "3396346"
},
{
"name": "CMake",
"bytes": "17118"
},
{
"name": "CSS",
"bytes": "7407"
},
{
"name": "Csound Score",
"bytes": "164"
},
{
"name": "DTrace",
"bytes": "3746"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "740582"
},
{
"name": "HTML",
"bytes": "354931"
},
{
"name": "Java",
"bytes": "186576"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "195794"
},
{
"name": "M4",
"bytes": "97709"
},
{
"name": "Makefile",
"bytes": "440584"
},
{
"name": "Objective-C",
"bytes": "42255"
},
{
"name": "PLSQL",
"bytes": "218116"
},
{
"name": "PLpgSQL",
"bytes": "5424886"
},
{
"name": "Perl",
"bytes": "3911633"
},
{
"name": "Perl 6",
"bytes": "8302"
},
{
"name": "Python",
"bytes": "8130606"
},
{
"name": "Roff",
"bytes": "39530"
},
{
"name": "Ruby",
"bytes": "26862"
},
{
"name": "SQLPL",
"bytes": "3939815"
},
{
"name": "Shell",
"bytes": "571615"
},
{
"name": "XS",
"bytes": "8405"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "519516"
}
],
"symlink_target": ""
} |
from typing import Any, Generator
class Task(object):
taskid: int = 0
def __init__(self, target: Generator[Any, Any, Any]):
Task.taskid += 1
self.tid: int = Task.taskid
self.target = target
self.sendval = None
def run(self):
return self.target.send(self.sendval)
if __name__ == '__main__':
# simple generator
def foo() -> Generator[Any, Any, Any]:
print(f"part 1")
yield
print(f"part 2")
yield
t = Task(foo())
print(f"running foo()")
t.run()
print(f"resuming foo()")
t.run()
| {
"content_hash": "a42285661549769f354e48dfc4cc86d7",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 57,
"avg_line_length": 21.925925925925927,
"alnum_prop": 0.5422297297297297,
"repo_name": "ASMlover/study",
"id": "a08e626ec997d1e8d7d06b3767d6a49cfcb40289",
"size": "1986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/coroutines/pyos1.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "3055440"
},
{
"name": "Batchfile",
"bytes": "4662"
},
{
"name": "Brainfuck",
"bytes": "571"
},
{
"name": "C",
"bytes": "13569580"
},
{
"name": "C#",
"bytes": "3959"
},
{
"name": "C++",
"bytes": "14741264"
},
{
"name": "CMake",
"bytes": "543917"
},
{
"name": "CSS",
"bytes": "11505"
},
{
"name": "Common Lisp",
"bytes": "114"
},
{
"name": "Emacs Lisp",
"bytes": "6042"
},
{
"name": "Go",
"bytes": "105203"
},
{
"name": "Groovy",
"bytes": "2907"
},
{
"name": "HTML",
"bytes": "911945"
},
{
"name": "Lex",
"bytes": "9370"
},
{
"name": "Lua",
"bytes": "32829"
},
{
"name": "Makefile",
"bytes": "1000611"
},
{
"name": "NASL",
"bytes": "3609"
},
{
"name": "NewLisp",
"bytes": "5805"
},
{
"name": "Perl",
"bytes": "594"
},
{
"name": "Python",
"bytes": "2752752"
},
{
"name": "SWIG",
"bytes": "91"
},
{
"name": "Shell",
"bytes": "9993"
},
{
"name": "Vim script",
"bytes": "92204"
},
{
"name": "Yacc",
"bytes": "6278"
}
],
"symlink_target": ""
} |
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup uses a pluggable XML or HTML parser to parse a
(possibly invalid) document into a tree representation. Beautiful Soup
provides provides methods and Pythonic idioms that make it easy to
navigate, search, and modify the parse tree.
Beautiful Soup works with Python 2.6 and up. It works better if lxml
and/or html5lib is installed.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "4.4.1"
__copyright__ = "Copyright (c) 2004-2015 Leonard Richardson"
__license__ = "MIT"
__all__ = ['BeautifulSoup']
import os
import re
import warnings
from .builder import builder_registry, ParserRejectedMarkup
from .dammit import UnicodeDammit
from .element import (
CData,
Comment,
DEFAULT_OUTPUT_ENCODING,
Declaration,
Doctype,
NavigableString,
PageElement,
ProcessingInstruction,
ResultSet,
SoupStrainer,
Tag,
)
# The very first thing we do is give a useful error if someone is
# running this code under Python 3 without converting it.
'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work.'<>'You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
class BeautifulSoup(Tag):
"""
This class defines the basic interface called by the tree builders.
These methods will be called by the parser:
reset()
feed(markup)
The tree builder may call these methods from its feed() implementation:
handle_starttag(name, attrs) # See note about return value
handle_endtag(name)
handle_data(data) # Appends to the current data node
endData(containerClass=NavigableString) # Ends the current data node
No matter how complicated the underlying parser is, you should be
able to build a tree using 'start tag' events, 'end tag' events,
'data' events, and "done with data" events.
If you encounter an empty-element tag (aka a self-closing tag,
like HTML's <br> tag), call handle_starttag and then
handle_endtag.
"""
ROOT_TAG_NAME = u'[document]'
# If the end-user gives no indication which tree builder they
# want, look for one with these features.
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nTo get rid of this warning, change this:\n\n BeautifulSoup([your markup])\n\nto this:\n\n BeautifulSoup([your markup], \"%(parser)s\")\n"
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, exclude_encodings=None,
**kwargs):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser."""
if 'convertEntities' in kwargs:
warnings.warn(
"BS4 does not respect the convertEntities argument to the "
"BeautifulSoup constructor. Entities are always converted "
"to Unicode characters.")
if 'markupMassage' in kwargs:
del kwargs['markupMassage']
warnings.warn(
"BS4 does not respect the markupMassage argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for any necessary markup massage.")
if 'smartQuotesTo' in kwargs:
del kwargs['smartQuotesTo']
warnings.warn(
"BS4 does not respect the smartQuotesTo argument to the "
"BeautifulSoup constructor. Smart quotes are always converted "
"to Unicode characters.")
if 'selfClosingTags' in kwargs:
del kwargs['selfClosingTags']
warnings.warn(
"BS4 does not respect the selfClosingTags argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for understanding self-closing tags.")
if 'isHTML' in kwargs:
del kwargs['isHTML']
warnings.warn(
"BS4 does not respect the isHTML argument to the "
"BeautifulSoup constructor. Suggest you use "
"features='lxml' for HTML and features='lxml-xml' for "
"XML.")
def deprecated_argument(old_name, new_name):
if old_name in kwargs:
warnings.warn(
'The "%s" argument to the BeautifulSoup constructor '
'has been renamed to "%s."' % (old_name, new_name))
value = kwargs[old_name]
del kwargs[old_name]
return value
return None
parse_only = parse_only or deprecated_argument(
"parseOnlyThese", "parse_only")
from_encoding = from_encoding or deprecated_argument(
"fromEncoding", "from_encoding")
if len(kwargs) > 0:
arg = kwargs.keys().pop()
raise TypeError(
"__init__() got an unexpected keyword argument '%s'" % arg)
if builder is None:
original_features = features
if isinstance(features, basestring):
features = [features]
if features is None or len(features) == 0:
features = self.DEFAULT_BUILDER_FEATURES
builder_class = builder_registry.lookup(*features)
if builder_class is None:
raise FeatureNotFound(
"Couldn't find a tree builder with the features you "
"requested: %s. Do you need to install a parser library?"
% ",".join(features))
builder = builder_class()
if not (original_features == builder.NAME or
original_features in builder.ALTERNATE_NAMES):
if builder.is_xml:
markup_type = "XML"
else:
markup_type = "HTML"
warnings.warn(self.NO_PARSER_SPECIFIED_WARNING % dict(
parser=builder.NAME,
markup_type=markup_type))
self.builder = builder
self.is_xml = builder.is_xml
self.builder.soup = self
self.parse_only = parse_only
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
elif len(markup) <= 256:
# Print out warnings for a couple beginner problems
# involving passing non-markup to Beautiful Soup.
# Beautiful Soup will still parse the input as markup,
# just in case that's what the user really wants.
if (isinstance(markup, unicode)
and not os.path.supports_unicode_filenames):
possible_filename = markup.encode("utf8")
else:
possible_filename = markup
is_file = False
try:
is_file = os.path.exists(possible_filename)
except Exception, e:
# This is almost certainly a problem involving
# characters not valid in filenames on this
# system. Just let it go.
pass
if is_file:
if isinstance(markup, unicode):
markup = markup.encode("utf8")
warnings.warn(
'"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup)
if markup[:5] == "http:" or markup[:6] == "https:":
# TODO: This is ugly but I couldn't get it to work in
# Python 3 otherwise.
if ((isinstance(markup, bytes) and not b' ' in markup)
or (isinstance(markup, unicode) and not u' ' in markup)):
if isinstance(markup, unicode):
markup = markup.encode("utf8")
warnings.warn(
'"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
for (self.markup, self.original_encoding, self.declared_html_encoding,
self.contains_replacement_characters) in (
self.builder.prepare_markup(
markup, from_encoding, exclude_encodings=exclude_encodings)):
self.reset()
try:
self._feed()
break
except ParserRejectedMarkup:
pass
# Clear out the markup and remove the builder's circular
# reference to this object.
self.markup = None
self.builder.soup = None
def __copy__(self):
return type(self)(self.encode(), builder=self.builder)
def __getstate__(self):
# Frequently a tree builder can't be pickled.
d = dict(self.__dict__)
if 'builder' in d and not self.builder.picklable:
del d['builder']
return d
def _feed(self):
# Convert the document to Unicode.
self.builder.reset()
self.builder.feed(self.markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.current_data = []
self.currentTag = None
self.tagStack = []
self.preserve_whitespace_tag_stack = []
self.pushTag(self)
def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
"""Create a new tag associated with this soup."""
return Tag(None, self.builder, name, namespace, nsprefix, attrs)
def new_string(self, s, subclass=NavigableString):
"""Create a new NavigableString associated with this soup."""
return subclass(s)
def insert_before(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
def insert_after(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
def popTag(self):
tag = self.tagStack.pop()
if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
self.preserve_whitespace_tag_stack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
if tag.name in self.builder.preserve_whitespace_tags:
self.preserve_whitespace_tag_stack.append(tag)
def endData(self, containerClass=NavigableString):
if self.current_data:
current_data = u''.join(self.current_data)
# If whitespace is not preserved, and this string contains
# nothing but ASCII spaces, replace it with a single space
# or newline.
if not self.preserve_whitespace_tag_stack:
strippable = True
for i in current_data:
if i not in self.ASCII_SPACES:
strippable = False
break
if strippable:
if '\n' in current_data:
current_data = '\n'
else:
current_data = ' '
# Reset the data collector.
self.current_data = []
# Should we add this string to the tree at all?
if self.parse_only and len(self.tagStack) <= 1 and \
(not self.parse_only.text or \
not self.parse_only.search(current_data)):
return
o = containerClass(current_data)
self.object_was_parsed(o)
def object_was_parsed(self, o, parent=None, most_recent_element=None):
"""Add an object to the parse tree."""
parent = parent or self.currentTag
previous_element = most_recent_element or self._most_recent_element
next_element = previous_sibling = next_sibling = None
if isinstance(o, Tag):
next_element = o.next_element
next_sibling = o.next_sibling
previous_sibling = o.previous_sibling
if not previous_element:
previous_element = o.previous_element
o.setup(parent, previous_element, next_element, previous_sibling, next_sibling)
self._most_recent_element = o
parent.contents.append(o)
if parent.next_sibling:
# This node is being inserted into an element that has
# already been parsed. Deal with any dangling references.
index = parent.contents.index(o)
if index == 0:
previous_element = parent
previous_sibling = None
else:
previous_element = previous_sibling = parent.contents[index-1]
if index == len(parent.contents)-1:
next_element = parent.next_sibling
next_sibling = None
else:
next_element = next_sibling = parent.contents[index+1]
o.previous_element = previous_element
if previous_element:
previous_element.next_element = o
o.next_element = next_element
if next_element:
next_element.previous_element = o
o.next_sibling = next_sibling
if next_sibling:
next_sibling.previous_sibling = o
o.previous_sibling = previous_sibling
if previous_sibling:
previous_sibling.next_sibling = o
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
# The BeautifulSoup object itself can never be popped.
return
most_recently_popped = None
stack_size = len(self.tagStack)
for i in range(stack_size - 1, 0, -1):
t = self.tagStack[i]
if (name == t.name and nsprefix == t.prefix):
if inclusivePop:
most_recently_popped = self.popTag()
break
most_recently_popped = self.popTag()
return most_recently_popped
def handle_starttag(self, name, namespace, nsprefix, attrs):
"""Push a start tag on to the stack.
If this method returns None, the tag was rejected by the
SoupStrainer. You should proceed as if the tag had not occurred
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
# print "Start tag %s: %s" % (name, attrs)
self.endData()
if (self.parse_only and len(self.tagStack) <= 1
and (self.parse_only.text
or not self.parse_only.search_tag(name, attrs))):
return None
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,
self.currentTag, self._most_recent_element)
if tag is None:
return tag
if self._most_recent_element:
self._most_recent_element.next_element = tag
self._most_recent_element = tag
self.pushTag(tag)
return tag
def handle_endtag(self, name, nsprefix=None):
#print "End tag: " + name
self.endData()
self._popToTag(name, nsprefix)
def handle_data(self, data):
self.current_data.append(data)
def decode(self, pretty_print=False,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a string or Unicode representation of this document.
To get Unicode, pass None for encoding."""
if self.is_xml:
# Print the XML declaration
encoding_part = ''
if eventual_encoding != None:
encoding_part = ' encoding="%s"' % eventual_encoding
prefix = u'<?xml version="1.0"%s?>\n' % encoding_part
else:
prefix = u''
if not pretty_print:
indent_level = None
else:
indent_level = 0
return prefix + super(BeautifulSoup, self).decode(
indent_level, eventual_encoding, formatter)
# Alias to make it easier to type import: 'from bs4 import _soup'
_s = BeautifulSoup
_soup = BeautifulSoup
class BeautifulStoneSoup(BeautifulSoup):
"""Deprecated interface to an XML parser."""
def __init__(self, *args, **kwargs):
kwargs['features'] = 'xml'
warnings.warn(
'The BeautifulStoneSoup class is deprecated. Instead of using '
'it, pass features="xml" into the BeautifulSoup constructor.')
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
class StopParsing(Exception):
pass
class FeatureNotFound(ValueError):
pass
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
| {
"content_hash": "824e6c2032f610f838604a55c4838810",
"timestamp": "",
"source": "github",
"line_count": 468,
"max_line_length": 467,
"avg_line_length": 39.02564102564103,
"alnum_prop": 0.5919842312746386,
"repo_name": "nasa/CrisisMappingToolkit",
"id": "a5b89462bc56d9b0da0fd35c92e11d7f61d23955",
"size": "18264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app_engine/libs/bs4/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2150"
},
{
"name": "JavaScript",
"bytes": "4257"
},
{
"name": "Jupyter Notebook",
"bytes": "18106"
},
{
"name": "Python",
"bytes": "849303"
}
],
"symlink_target": ""
} |
"""
List all Windows services installed.
$ python scripts/winservices.py
AeLookupSvc (Application Experience)
status: stopped, start: manual, username: localSystem, pid: None
binpath: C:\Windows\system32\svchost.exe -k netsvcs
ALG (Application Layer Gateway Service)
status: stopped, start: manual, username: NT AUTHORITY\LocalService, pid: None
binpath: C:\Windows\System32\alg.exe
APNMCP (Ask Update Service)
status: running, start: automatic, username: LocalSystem, pid: 1108
binpath: "C:\Program Files (x86)\AskPartnerNetwork\Toolbar\apnmcp.exe"
AppIDSvc (Application Identity)
status: stopped, start: manual, username: NT Authority\LocalService, pid: None
binpath: C:\Windows\system32\svchost.exe -k LocalServiceAndNoImpersonation
Appinfo (Application Information)
status: stopped, start: manual, username: LocalSystem, pid: None
binpath: C:\Windows\system32\svchost.exe -k netsvcs
...
"""
import os
import sys
import psutil
if os.name != 'nt':
sys.exit("platform not supported (Windows only)")
def main():
for service in psutil.win_service_iter():
info = service.as_dict()
print("%r (%r)" % (info['name'], info['display_name']))
print("status: %s, start: %s, username: %s, pid: %s" % (
info['status'], info['start_type'], info['username'], info['pid']))
print("binpath: %s" % info['binpath'])
print("")
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "fd1f45a4e57a3d2246fce359a5a7e962",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 79,
"avg_line_length": 28.64,
"alnum_prop": 0.6941340782122905,
"repo_name": "HongxuChen/dotfiles",
"id": "8cc5ed4e9fdccf0bdeff97dd3e249ba75e39db91",
"size": "1621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_bin/psutils/winservices.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21849"
},
{
"name": "GDB",
"bytes": "47083"
},
{
"name": "JavaScript",
"bytes": "3526"
},
{
"name": "Lua",
"bytes": "281"
},
{
"name": "Python",
"bytes": "340891"
},
{
"name": "Scala",
"bytes": "3569"
},
{
"name": "Scheme",
"bytes": "3812"
},
{
"name": "Shell",
"bytes": "180052"
},
{
"name": "TeX",
"bytes": "13866"
},
{
"name": "Vim script",
"bytes": "20151"
}
],
"symlink_target": ""
} |
from click.testing import CliRunner
from mock import Mock, patch
from sigopt.cli import cli
class TestClusterCreateCli(object):
def test_cluster_create(self):
services = Mock()
runner = CliRunner()
with \
runner.isolated_filesystem(), \
patch('sigopt.orchestrate.controller.OrchestrateServiceBag', return_value=services):
open("cluster.yml", "w").close()
result = runner.invoke(cli, ["cluster", "create"])
assert result.exit_code == 0
| {
"content_hash": "d5f11a8517f33821d284a96964a512b9",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 90,
"avg_line_length": 30,
"alnum_prop": 0.69375,
"repo_name": "sigopt/sigopt-python",
"id": "b4c7df2e25df81ca01eef8849e07d008cde26d64",
"size": "551",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/cli/test_cluster_create.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2411"
},
{
"name": "Makefile",
"bytes": "545"
},
{
"name": "Python",
"bytes": "542280"
},
{
"name": "Shell",
"bytes": "868"
}
],
"symlink_target": ""
} |
"""
Class for interacting with our PXE server and Access Control
(c) 2015 Massachusetts Institute of Technology
"""
import socket
import logging
logger = logging.getLogger(__name__)
import lophi.globals as G
class PXEServer:
def __init__(self,ip_address,acl_port=G.PXE_ACL_PORT):
"""
Intiialize our interface to our PXE server
@param ip_address: IP address of our PXE server
@param acl_port: Port to communicate to the ACL server on.
"""
self.ip_address = ip_address
self.acl_port = acl_port
def __is_valid_mac(self,mac_address):
"""
Check to see if the mac address given is a valid MAC format
@param mac_address: MAC address to check
@return: True/False
@TODO: Implement this?
"""
return True
def set_pxe_conf(self, mac_address,pxe_conf):
"""
Set which PXE configuration this MAC should boot from
@param mac_address: MAC of the system we are adding
@param pxe_conf: Name of a config to boot clonezilla. These
files are the ones stored in pxelinux.cfg. If not file exists,
it will scan the samba images directory and auto-generate a config
if the image exist.
"""
if not self.__is_valid_mac(mac_address):
logger.error("Mac address (%s) is an invalid format."%mac_address)
return False
# add mac address to PXE server ACL
msg = " ".join([G.PXE_SET_CONF, mac_address, pxe_conf])
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(10)
try:
sock.connect((self.ip_address, self.acl_port))
logger.debug("Sending request to boot %s from %s. (%s:%d)"%(mac_address,
pxe_conf,
self.ip_address,
self.acl_port))
sock.send(msg)
except: # probably get here b/c of socket timeout
logger.error("Problem sending request to boot %s from %s. (%s:%d)"%(mac_address,
pxe_conf,
self.ip_address,
self.acl_port))
finally:
sock.close()
return True
def add_mac(self,mac_address):
"""
Add a MAC address to our access-control list so that the machine
will boot from PXE.
@param mac_address: MAC Address to be added to PXE ACL.
"""
if not self.__is_valid_mac(mac_address):
logger.error("Mac address (%s) is an invalid format."%mac_address)
return False
# add mac address to PXE server ACL
msg = " ".join([G.PXE_ADD_ACL,mac_address])
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(10)
try:
sock.connect((self.ip_address, self.acl_port))
logger.debug("Sending request to add %s to %s:%d."%(mac_address,self.ip_address,self.acl_port))
sock.send(msg)
except: # probably get here b/c of socket timeout
logger.error("Problem sending request to add %s to %s:%d."%(mac_address,self.ip_address,self.acl_port))
sock.close()
return False
sock.close()
return True
def get_ip(self,mac_address):
"""
Query our DHCP/PXE server to resolve an IP address from a given MAC
@param mac_address: MAC address to lookup at DHCP server
"""
# Send command to DHCP server
msg = " ".join([G.PXE_GET_IP,mac_address])
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(10)
# get response
resp = None
try:
sock.connect((self.ip_address, self.acl_port))
sock.send(msg)
resp = sock.recv(512)
except: # probably get here b/c of socket timeout
pass
finally:
sock.close()
if resp is None or resp == G.PXE_NO_IP_RESP:
logger.error("Failed to get IP address for machine. PXE Server down? (%s)" % mac_address)
return None
else:
self.ip_addr = resp
return resp | {
"content_hash": "f18f4009e10cf8e908c6c952444c25f1",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 115,
"avg_line_length": 35.35507246376812,
"alnum_prop": 0.49292887886862063,
"repo_name": "mit-ll/LO-PHI",
"id": "d6851765c1e20d6b64777f9741e7f74b91048130",
"size": "4879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lophi-net-services/lophinet/pxeserver.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "58723"
},
{
"name": "Elixir",
"bytes": "18208"
},
{
"name": "Emacs Lisp",
"bytes": "1368"
},
{
"name": "Groff",
"bytes": "1900"
},
{
"name": "M4",
"bytes": "2284"
},
{
"name": "Makefile",
"bytes": "64810"
},
{
"name": "Protocol Buffer",
"bytes": "1803"
},
{
"name": "Python",
"bytes": "1220515"
},
{
"name": "Shell",
"bytes": "23976"
}
],
"symlink_target": ""
} |
from .auth_endpoint import Auth
from .data_acceleration_report_endpoint import DataAccelerationReport
from .data_alert_endpoint import DataAlerts
from .databases_endpoint import Databases
from .datasources_endpoint import Datasources
from .endpoint import Endpoint, QuerysetEndpoint
from .exceptions import (
ServerResponseError,
MissingRequiredFieldError,
ServerInfoEndpointNotFoundError,
)
from .favorites_endpoint import Favorites
from .fileuploads_endpoint import Fileuploads
from .flow_runs_endpoint import FlowRuns
from .flows_endpoint import Flows
from .groups_endpoint import Groups
from .jobs_endpoint import Jobs
from .metadata_endpoint import Metadata
from .metrics_endpoint import Metrics
from .projects_endpoint import Projects
from .schedules_endpoint import Schedules
from .server_info_endpoint import ServerInfo
from .sites_endpoint import Sites
from .subscriptions_endpoint import Subscriptions
from .tables_endpoint import Tables
from .tasks_endpoint import Tasks
from .users_endpoint import Users
from .views_endpoint import Views
from .webhooks_endpoint import Webhooks
from .workbooks_endpoint import Workbooks
| {
"content_hash": "26f2ea732639dacb2de619f22fb9626f",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 69,
"avg_line_length": 38.13333333333333,
"alnum_prop": 0.8409090909090909,
"repo_name": "tableau/server-client-python",
"id": "e14bb8cff1aa14336769aaa9b4ad3084997afeeb",
"size": "1144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tableauserverclient/server/endpoint/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "858778"
},
{
"name": "Shell",
"bytes": "208"
}
],
"symlink_target": ""
} |
"""
Defines the Intermediate Representation that is generated by the frontend and
fed to the backends.
The goal of this module is to define all data types that are common to the
languages and serialization formats we want to support.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from abc import ABCMeta, abstractmethod
from collections import OrderedDict, deque
import copy
import datetime
import math
import numbers
import re
import six
from ..frontend.exception import InvalidSpec
from ..frontend.ast import (
AstExampleField,
AstExampleRef,
AstTagRef,
)
_MYPY = False
if _MYPY:
import typing # noqa: F401 # pylint: disable=import-error,unused-import,useless-suppression
class ParameterError(Exception):
"""Raised when a data type is parameterized with a bad type or value."""
pass
def generic_type_name(v):
"""
Return a descriptive type name that isn't Python specific. For example, an
int type will return 'integer' rather than 'int'.
"""
if isinstance(v, AstExampleRef):
return "reference"
elif isinstance(v, numbers.Integral):
# Must come before real numbers check since integrals are reals too
return 'integer'
elif isinstance(v, numbers.Real):
return 'float'
elif isinstance(v, (tuple, list)):
return 'list'
elif isinstance(v, six.string_types):
return 'string'
elif v is None:
return 'null'
else:
return type(v).__name__
class DataType(object):
"""
Abstract class representing a data type.
"""
__metaclass__ = ABCMeta
def __init__(self):
"""No-op. Exists so that introspection can be certain that an init
method exists."""
pass
@property
def name(self):
"""Returns an easy to read name for the type."""
return self.__class__.__name__
@abstractmethod
def check(self, val):
"""
Checks if a value specified in a spec (translated to a Python object)
is a valid Python value for this type. Returns nothing, but can raise
an exception.
Args:
val (object)
Raises:
ValueError
"""
pass
@abstractmethod
def check_example(self, ex_field):
"""
Checks if an example field from a spec is valid. Returns nothing, but
can raise an exception.
Args:
ex_field (AstExampleField)
Raises:
InvalidSpec
"""
pass
def __repr__(self):
return self.name
class Primitive(DataType):
# pylint: disable=abstract-method
def check_attr_repr(self, attr_field):
try:
self.check(attr_field.value)
except ValueError as e:
raise InvalidSpec(e.args[0], attr_field.lineno, attr_field.path)
return attr_field.value
class Composite(DataType):
"""
Composite types are any data type which can be constructed using primitive
data types and other composite types.
"""
# pylint: disable=abstract-method
pass
class Nullable(Composite):
def __init__(self, data_type):
super(Nullable, self).__init__()
self.data_type = data_type
def check(self, val):
if val is not None:
return self.data_type.check(val)
def check_example(self, ex_field):
if ex_field.value is not None:
return self.data_type.check_example(ex_field)
def check_attr_repr(self, attr_field):
if attr_field.value is None:
return None
else:
return self.data_type.check_attr_repr(attr_field)
class Void(Primitive):
def check(self, val):
if val is not None:
raise ValueError('void type can only be null')
def check_example(self, ex_field):
if ex_field.value is not None:
raise InvalidSpec('example of void type must be null',
ex_field.lineno, ex_field.path)
def check_attr_repr(self, attr_field):
raise NotImplementedError
class Bytes(Primitive):
def check(self, val):
if not isinstance(val, (bytes, six.text_type)):
raise ValueError('%r is not valid bytes' % val)
def check_example(self, ex_field):
if not isinstance(ex_field.value, (bytes, six.text_type)):
raise InvalidSpec("'%s' is not valid bytes" % ex_field.value,
ex_field.lineno, ex_field.path)
def check_attr_repr(self, attr_field):
try:
self.check(attr_field.value)
except ValueError as e:
raise InvalidSpec(e.args[0], attr_field.lineno, attr_field.path)
v = attr_field.value
if isinstance(v, six.text_type):
return v.encode('utf-8')
else:
return v
class _BoundedInteger(Primitive):
"""
When extending, specify 'minimum' and 'maximum' as class variables. This
is the range of values supported by the data type.
"""
# See <https://github.com/python/mypy/issues/1833>
minimum = None # type: typing.Optional[int]
maximum = None # type: typing.Optional[int]
def __init__(self, min_value=None, max_value=None):
"""
A more restrictive minimum or maximum value can be specified than the
range inherent to the defined type.
"""
super(_BoundedInteger, self).__init__()
if min_value is not None:
if not isinstance(min_value, numbers.Integral):
raise ParameterError('min_value must be an integral number')
if min_value < self.minimum:
raise ParameterError('min_value cannot be less than the '
'minimum value for this type (%s < %s)' %
(min_value, self.minimum))
if max_value is not None:
if not isinstance(max_value, numbers.Integral):
raise ParameterError('max_value must be an integral number')
if max_value > self.maximum:
raise ParameterError('max_value cannot be greater than the '
'maximum value for this type (%s < %s)' %
(max_value, self.maximum))
self.min_value = min_value
self.max_value = max_value
def check(self, val):
if not isinstance(val, numbers.Integral):
raise ValueError('%s is not a valid integer' %
generic_type_name(val))
if not (self.minimum <= val <= self.maximum):
raise ValueError('%d is not within range [%r, %r]'
% (val, self.minimum, self.maximum))
if self.min_value is not None and val < self.min_value:
raise ValueError('%d is less than %d' %
(val, self.min_value))
if self.max_value is not None and val > self.max_value:
raise ValueError('%d is greater than %d' %
(val, self.max_value))
def check_example(self, ex_field):
try:
self.check(ex_field.value)
except ValueError as e:
raise InvalidSpec(e.args[0], ex_field.lineno, ex_field.path)
def __repr__(self):
return '%s()' % self.name
class Int32(_BoundedInteger):
minimum = -2**31
maximum = 2**31 - 1
class UInt32(_BoundedInteger):
minimum = 0
maximum = 2**32 - 1
class Int64(_BoundedInteger):
minimum = -2**63
maximum = 2**63 - 1
class UInt64(_BoundedInteger):
minimum = 0
maximum = 2**64 - 1
class _BoundedFloat(Primitive):
"""
When extending, optionally specify 'minimum' and 'maximum' as class
variables. This is the range of values supported by the data type. For
a float64, there is no need to specify a minimum and maximum since Python's
native float implementation is a float64/double. Therefore, any Python
float will pass the data type range check automatically.
"""
# See <https://github.com/python/mypy/issues/1833>
minimum = None # type: typing.Optional[float]
maximum = None # type: typing.Optional[float]
def __init__(self, min_value=None, max_value=None):
"""
A more restrictive minimum or maximum value can be specified than the
range inherent to the defined type.
"""
super(_BoundedFloat, self).__init__()
if min_value is not None:
if not isinstance(min_value, numbers.Real):
raise ParameterError('min_value must be a real number')
if not isinstance(min_value, float):
try:
min_value = float(min_value)
except OverflowError:
raise ParameterError('min_value is too small for a float')
if self.minimum is not None and min_value < self.minimum:
raise ParameterError('min_value cannot be less than the '
'minimum value for this type (%f < %f)' %
(min_value, self.minimum))
if max_value is not None:
if not isinstance(max_value, numbers.Real):
raise ParameterError('max_value must be a real number')
if not isinstance(max_value, float):
try:
max_value = float(max_value)
except OverflowError:
raise ParameterError('max_value is too large for a float')
if self.maximum is not None and max_value > self.maximum:
raise ParameterError('max_value cannot be greater than the '
'maximum value for this type (%f < %f)' %
(max_value, self.maximum))
self.min_value = min_value
self.max_value = max_value
def check(self, val):
if not isinstance(val, numbers.Real):
raise ValueError('%s is not a valid real number' %
generic_type_name(val))
if not isinstance(val, float):
try:
val = float(val)
except OverflowError:
raise ValueError('%r is too large for float' % val)
if math.isnan(val) or math.isinf(val):
# Parser doesn't support NaN or Inf yet.
raise ValueError('%f values are not supported' % val)
if self.minimum is not None and val < self.minimum:
raise ValueError('%f is less than %f' %
(val, self.minimum))
if self.maximum is not None and val > self.maximum:
raise ValueError('%f is greater than %f' %
(val, self.maximum))
if self.min_value is not None and val < self.min_value:
raise ValueError('%f is less than %f' %
(val, self.min_value))
if self.max_value is not None and val > self.max_value:
raise ValueError('%f is greater than %f' %
(val, self.min_value))
def check_example(self, ex_field):
try:
self.check(ex_field.value)
except ValueError as e:
raise InvalidSpec(e.args[0], ex_field.lineno, ex_field.path)
def __repr__(self):
return '%s()' % self.name
class Float32(_BoundedFloat):
# Maximum and minimums from the IEEE 754-1985 standard
minimum = -3.40282 * 10**38
maximum = 3.40282 * 10**38
class Float64(_BoundedFloat):
pass
class Boolean(Primitive):
def check(self, val):
if not isinstance(val, bool):
raise ValueError('%r is not a valid boolean' % val)
def check_example(self, ex_field):
try:
self.check(ex_field.value)
except ValueError as e:
raise InvalidSpec(e.args[0], ex_field.lineno, ex_field.path)
class String(Primitive):
def __init__(self, min_length=None, max_length=None, pattern=None):
super(String, self).__init__()
if min_length is not None:
if not isinstance(min_length, numbers.Integral):
raise ParameterError('min_length must be an integral number')
if min_length < 0:
raise ParameterError('min_length must be >= 0')
if max_length is not None:
if not isinstance(max_length, numbers.Integral):
raise ParameterError('max_length must be an integral number')
if max_length < 1:
raise ParameterError('max_length must be > 0')
if min_length and max_length:
if max_length < min_length:
raise ParameterError('max_length must be >= min_length')
self.min_length = min_length
self.max_length = max_length
self.pattern = pattern
self.pattern_re = None
if pattern:
if not isinstance(pattern, six.string_types):
raise ParameterError('pattern must be a string')
try:
self.pattern_re = re.compile(pattern)
except re.error as e:
raise ParameterError(
'could not compile regex pattern {!r}: {}'.format(
pattern, e.args[0]))
def check(self, val):
if not isinstance(val, six.string_types):
raise ValueError('%s is not a valid string' %
generic_type_name(val))
elif self.max_length is not None and len(val) > self.max_length:
raise ValueError("'%s' has more than %d character(s)"
% (val, self.max_length))
elif self.min_length is not None and len(val) < self.min_length:
raise ValueError("'%s' has fewer than %d character(s)"
% (val, self.min_length))
elif self.pattern and not self.pattern_re.match(val):
raise ValueError("'%s' did not match pattern '%s'"
% (val, self.pattern))
def check_example(self, ex_field):
try:
self.check(ex_field.value)
except ValueError as e:
raise InvalidSpec(e.args[0], ex_field.lineno, ex_field.path)
class Timestamp(Primitive):
def __init__(self, fmt):
super(Timestamp, self).__init__()
if not isinstance(fmt, six.string_types):
raise ParameterError('format must be a string')
self.format = fmt
def check(self, val):
if not isinstance(val, six.string_types):
raise ValueError('timestamp must be specified as a string')
# Raises a ValueError if val is the incorrect format
datetime.datetime.strptime(val, self.format)
def check_example(self, ex_field):
try:
self.check(ex_field.value)
except ValueError as e:
raise InvalidSpec(e.args[0], ex_field.lineno, ex_field.path)
def check_attr_repr(self, attr_field):
try:
self.check(attr_field.value)
except ValueError as e:
msg = e.args[0]
if isinstance(msg, six.binary_type):
# For Python 2 compatibility.
msg = msg.decode('utf-8')
raise InvalidSpec(msg, attr_field.lineno, attr_field.path)
return datetime.datetime.strptime(attr_field.value, self.format)
class List(Composite):
def __init__(self, data_type, min_items=None, max_items=None):
super(List, self).__init__()
self.data_type = data_type
if min_items is not None and min_items < 0:
raise ParameterError('min_items must be >= 0')
if max_items is not None and max_items < 1:
raise ParameterError('max_items must be > 0')
if min_items and max_items and max_items < min_items:
raise ParameterError('max_length must be >= min_length')
self.min_items = min_items
self.max_items = max_items
def check(self, val):
raise NotImplementedError
def check_example(self, ex_field):
try:
self._check_list_container(ex_field.value)
for item in ex_field.value:
new_ex_field = AstExampleField(
ex_field.path,
ex_field.lineno,
ex_field.lexpos,
ex_field.name,
item)
self.data_type.check_example(new_ex_field)
except ValueError as e:
raise InvalidSpec(e.args[0], ex_field.lineno, ex_field.path)
def _check_list_container(self, val):
if not isinstance(val, list):
raise ValueError('%s is not a valid list' % generic_type_name(val))
elif self.max_items is not None and len(val) > self.max_items:
raise ValueError('list has more than %s item(s)' % self.max_items)
elif self.min_items is not None and len(val) < self.min_items:
raise ValueError('list has fewer than %s item(s)' % self.min_items)
class Map(Composite):
def __init__(self, key_data_type, value_data_type):
super(Map, self).__init__()
if not isinstance(key_data_type, String):
raise ParameterError("Only String primitives are supported as key types.")
self.key_data_type = key_data_type
self.value_data_type = value_data_type
def check(self, val):
raise NotImplementedError
def check_example(self, ex_field):
if not isinstance(ex_field.value, dict):
raise ValueError("%s is not a valid map" % generic_type_name(ex_field.value))
for k, v in ex_field.value.items():
ex_key_field = self._make_ex_field(ex_field, k)
ex_value_field = self._make_ex_field(ex_field, v)
self.key_data_type.check_example(ex_key_field)
self.value_data_type.check_example(ex_value_field)
def _make_ex_field(self, ex_field, value):
return AstExampleField(
ex_field.path,
ex_field.lineno,
ex_field.lexpos,
ex_field.name,
value)
def doc_unwrap(raw_doc):
"""
Applies two transformations to raw_doc:
1. N consecutive newlines are converted into N-1 newlines.
2. A lone newline is converted to a space, which basically unwraps text.
Returns a new string, or None if the input was None.
"""
if raw_doc is None:
return None
docstring = ''
consecutive_newlines = 0
# Remove all leading and trailing whitespace in the documentation block
for c in raw_doc.strip():
if c == '\n':
consecutive_newlines += 1
if consecutive_newlines > 1:
docstring += c
else:
if consecutive_newlines == 1:
docstring += ' '
consecutive_newlines = 0
docstring += c
return docstring
class Field(object):
"""
Represents a field in a composite type.
"""
def __init__(self,
name,
data_type,
doc,
ast_node):
"""
Creates a new Field.
:param str name: Name of the field.
:param Type data_type: The type of variable for of this field.
:param str doc: Documentation for the field.
:param ast_node: Raw field definition from the parser.
:type ast_node: stone.frontend.ast.AstField
"""
self.name = name
self.data_type = data_type
self.raw_doc = doc
self.doc = doc_unwrap(doc)
self._ast_node = ast_node
self.redactor = None
self.omitted_caller = None
self.deprecated = None
self.preview = None
def set_annotations(self, annotations):
if not annotations:
return
for annotation in annotations:
if isinstance(annotation, Deprecated):
if self.deprecated:
raise InvalidSpec("Deprecated value already set as %r." %
str(self.deprecated), self._ast_node.lineno)
if self.preview:
raise InvalidSpec("'Deprecated' and 'Preview' can\'t both be set.",
self._ast_node.lineno)
self.deprecated = True
self.doc = 'Field is deprecated. {}'.format(self.doc)
elif isinstance(annotation, Omitted):
if self.omitted_caller:
raise InvalidSpec("Omitted caller already set as %r." %
str(self.omitted_caller), self._ast_node.lineno)
self.omitted_caller = annotation.omitted_caller
self.doc = 'Field is only returned for "{}" callers. {}'.format(
str(self.omitted_caller), self.doc)
elif isinstance(annotation, Preview):
if self.preview:
raise InvalidSpec("Preview value already set as %r." %
str(self.preview), self._ast_node.lineno)
if self.deprecated:
raise InvalidSpec("'Deprecated' and 'Preview' can\'t both be set.",
self._ast_node.lineno)
self.preview = True
self.doc = 'Field is in preview mode - do not rely on in production. {}'.format(
self.doc
)
elif isinstance(annotation, Redacted):
# Make sure we don't set multiple conflicting annotations on one field
if self.redactor:
raise InvalidSpec("Redactor already set as %r." %
str(self.redactor), self._ast_node.lineno)
self.redactor = annotation
else:
raise InvalidSpec(
'Annotation %r not recognized for field.' % annotation, self._ast_node.lineno)
def __repr__(self):
return 'Field(%r, %r)' % (self.name,
self.data_type)
class StructField(Field):
"""
Represents a field of a struct.
"""
def __init__(self,
name,
data_type,
doc,
ast_node):
"""
Creates a new Field.
:param str name: Name of the field.
:param Type data_type: The type of variable for of this field.
:param str doc: Documentation for the field.
:param ast_node: Raw field definition from the parser.
:type ast_node: stone.frontend.ast.AstField
"""
super(StructField, self).__init__(name, data_type, doc, ast_node)
self.has_default = False
self._default = None
def set_default(self, default):
self.has_default = True
self._default = default
@property
def default(self):
if not self.has_default:
raise Exception('Type has no default')
else:
return self._default
def check_attr_repr(self, attr):
if attr is not None:
attr = self.data_type.check_attr_repr(attr)
if attr is None:
if self.has_default:
return self.default
_, unwrapped_nullable, _ = unwrap(self.data_type)
if unwrapped_nullable:
return None
else:
raise KeyError(self.name)
return attr
def __repr__(self):
return 'StructField(%r, %r, %r)' % (self.name,
self.data_type,
self.omitted_caller)
class UnionField(Field):
"""
Represents a field of a union.
"""
def __init__(self,
name,
data_type,
doc,
ast_node,
catch_all=False):
super(UnionField, self).__init__(name, data_type, doc, ast_node)
self.catch_all = catch_all
def __repr__(self):
return 'UnionField(%r, %r, %r, %r)' % (self.name,
self.data_type,
self.catch_all,
self.omitted_caller)
class UserDefined(Composite):
"""
These are types that are defined directly in specs.
"""
DEFAULT_EXAMPLE_LABEL = 'default'
def __init__(self, name, namespace, ast_node):
"""
When this is instantiated, the type is treated as a forward reference.
Only when :meth:`set_attributes` is called is the type considered to
be fully defined.
:param str name: Name of type.
:param stone.ir.Namespace namespace: The namespace this type is
defined in.
:param ast_node: Raw type definition from the parser.
:type ast_node: stone.frontend.ast.AstTypeDef
"""
super(UserDefined, self).__init__()
self._name = name
self.namespace = namespace
self._ast_node = ast_node
self._is_forward_ref = True
self.raw_doc = None
self.doc = None
self.fields = None
self.parent_type = None
self._raw_examples = None
self._examples = None
self._fields_by_name = None
def set_attributes(self, doc, fields, parent_type=None):
"""
Fields are specified as a list so that order is preserved for display
purposes only. (Might be used for certain serialization formats...)
:param str doc: Description of type.
:param list(Field) fields: Ordered list of fields for type.
:param Optional[Composite] parent_type: The type this type inherits
from.
"""
self.raw_doc = doc
self.doc = doc_unwrap(doc)
self.fields = fields
self.parent_type = parent_type
self._raw_examples = OrderedDict()
self._examples = OrderedDict()
self._fields_by_name = {} # Dict[str, Field]
# Check that no two fields share the same name.
for field in self.fields:
if field.name in self._fields_by_name:
orig_lineno = self._fields_by_name[field.name]._ast_node.lineno
raise InvalidSpec("Field '%s' already defined on line %s." %
(field.name, orig_lineno),
field._ast_node.lineno)
self._fields_by_name[field.name] = field
# Check that the fields for this type do not match any of the fields of
# its parents.
cur_type = self.parent_type
while cur_type:
for field in self.fields:
if field.name in cur_type._fields_by_name:
lineno = cur_type._fields_by_name[field.name]._ast_node.lineno
raise InvalidSpec(
"Field '%s' already defined in parent '%s' on line %d."
% (field.name, cur_type.name, lineno),
field._ast_node.lineno)
cur_type = cur_type.parent_type
# Indicate that the attributes of the type have been populated.
self._is_forward_ref = False
@property
def all_fields(self):
raise NotImplementedError
def has_documented_type_or_fields(self, include_inherited_fields=False):
"""Returns whether this type, or any of its fields, are documented.
Use this when deciding whether to create a block of documentation for
this type.
"""
if self.doc:
return True
else:
return self.has_documented_fields(include_inherited_fields)
def has_documented_fields(self, include_inherited_fields=False):
"""Returns whether at least one field is documented."""
fields = self.all_fields if include_inherited_fields else self.fields
for field in fields:
if field.doc:
return True
return False
def get_all_omitted_callers(self):
"""Returns all unique omitted callers for the object."""
return {f.omitted_caller for f in self.fields if f.omitted_caller}
@property
def name(self):
return self._name
def copy(self):
return copy.deepcopy(self)
def prepend_field(self, field):
self.fields.insert(0, field)
def get_examples(self, compact=False):
"""
Returns an OrderedDict mapping labels to Example objects.
Args:
compact (bool): If True, union members of void type are converted
to their compact representation: no ".tag" key or containing
dict, just the tag as a string.
"""
# Copy it just in case the caller wants to mutate the object.
examples = copy.deepcopy(self._examples)
if not compact:
return examples
def make_compact(d):
# Traverse through dicts looking for ones that have a lone .tag
# key, which can be converted into the compact form.
if not isinstance(d, dict):
return
for key in d:
if isinstance(d[key], dict):
inner_d = d[key]
if len(inner_d) == 1 and '.tag' in inner_d:
d[key] = inner_d['.tag']
else:
make_compact(inner_d)
if isinstance(d[key], list):
for item in d[key]:
make_compact(item)
for example in examples.values():
if (isinstance(example.value, dict) and
len(example.value) == 1 and '.tag' in example.value):
# Handle the case where the top-level of the example can be
# made compact.
example.value = example.value['.tag']
else:
make_compact(example.value)
return examples
class Example(object):
"""An example of a struct or union type."""
def __init__(self, label, text, value, ast_node=None):
assert isinstance(label, six.text_type), type(label)
self.label = label
assert isinstance(text, (six.text_type, type(None))), type(text)
self.text = doc_unwrap(text) if text else text
assert isinstance(value, (six.text_type, OrderedDict)), type(value)
self.value = value
self._ast_node = ast_node
def __repr__(self):
return 'Example({!r}, {!r}, {!r})'.format(
self.label, self.text, self.value)
class Struct(UserDefined):
"""
Defines a product type: Composed of other primitive and/or struct types.
"""
# pylint: disable=attribute-defined-outside-init
composite_type = 'struct'
def set_attributes(self, doc, fields, parent_type=None):
"""
See :meth:`Composite.set_attributes` for parameter definitions.
"""
if parent_type:
assert isinstance(parent_type, Struct)
self.subtypes = []
# These are only set if this struct enumerates subtypes.
self._enumerated_subtypes = None # Optional[List[Tuple[str, DataType]]]
self._is_catch_all = None # Optional[Bool]
super(Struct, self).set_attributes(doc, fields, parent_type)
if self.parent_type:
self.parent_type.subtypes.append(self)
def check(self, val):
raise NotImplementedError
def check_example(self, ex_field):
if not isinstance(ex_field.value, AstExampleRef):
raise InvalidSpec(
"example must reference label of '%s'" % self.name,
ex_field.lineno, ex_field.path)
def check_attr_repr(self, attrs):
# Since we mutate it, let's make a copy to avoid mutating the argument.
attrs = attrs.copy()
validated_attrs = {}
for field in self.all_fields:
attr = field.check_attr_repr(attrs.pop(field.name, None))
validated_attrs[field.name] = attr
if attrs:
attr_name, attr_field = attrs.popitem()
raise InvalidSpec(
"Route attribute '%s' is not defined in 'stone_cfg.Route'."
% attr_name, attr_field.lineno, attr_field.path)
return validated_attrs
@property
def all_fields(self):
"""
Returns an iterator of all fields. Required fields before optional
fields. Super type fields before type fields.
"""
return self.all_required_fields + self.all_optional_fields
def _filter_fields(self, filter_function):
"""
Utility to iterate through all fields (super types first) of a type.
:param filter: A function that takes in a Field object. If it returns
True, the field is part of the generated output. If False, it is
omitted.
"""
fields = []
if self.parent_type:
fields.extend(self.parent_type._filter_fields(filter_function))
fields.extend(filter(filter_function, self.fields))
return fields
@property
def all_required_fields(self):
"""
Returns an iterator that traverses required fields in all super types
first, and then for this type.
"""
def required_check(f):
return not is_nullable_type(f.data_type) and not f.has_default
return self._filter_fields(required_check)
@property
def all_optional_fields(self):
"""
Returns an iterator that traverses optional fields in all super types
first, and then for this type.
"""
def optional_check(f):
return is_nullable_type(f.data_type) or f.has_default
return self._filter_fields(optional_check)
def has_enumerated_subtypes(self):
"""
Whether this struct enumerates its subtypes.
"""
return bool(self._enumerated_subtypes)
def get_enumerated_subtypes(self):
"""
Returns a list of subtype fields. Each field has a `name` attribute
which is the tag for the subtype. Each field also has a `data_type`
attribute that is a `Struct` object representing the subtype.
"""
assert self._enumerated_subtypes is not None
return self._enumerated_subtypes
def is_member_of_enumerated_subtypes_tree(self):
"""
Whether this struct enumerates subtypes or is a struct that is
enumerated by its parent type. Because such structs are serialized
and deserialized differently, use this method to detect these.
"""
return (self.has_enumerated_subtypes() or
(self.parent_type and
self.parent_type.has_enumerated_subtypes()))
def is_catch_all(self):
"""
Indicates whether this struct should be used in the event that none of
its known enumerated subtypes match a received type tag.
Use this method only if the struct has enumerated subtypes.
Returns: bool
"""
assert self._enumerated_subtypes is not None
return self._is_catch_all
def set_enumerated_subtypes(self, subtype_fields, is_catch_all):
"""
Sets the list of "enumerated subtypes" for this struct. This differs
from regular subtyping in that each subtype is associated with a tag
that is used in the serialized format to indicate the subtype. Also,
this list of subtypes was explicitly defined in an "inner-union" in the
specification. The list of fields must include all defined subtypes of
this struct.
NOTE(kelkabany): For this to work with upcoming forward references, the
hierarchy of parent types for this struct must have had this method
called on them already.
:type subtype_fields: List[UnionField]
"""
assert self._enumerated_subtypes is None, \
'Enumerated subtypes already set.'
assert isinstance(is_catch_all, bool), type(is_catch_all)
self._is_catch_all = is_catch_all
self._enumerated_subtypes = []
if self.parent_type:
raise InvalidSpec(
"'%s' enumerates subtypes so it cannot extend another struct."
% self.name, self._ast_node.lineno, self._ast_node.path)
# Require that if this struct enumerates subtypes, its parent (and thus
# the entire hierarchy above this struct) does as well.
if self.parent_type and not self.parent_type.has_enumerated_subtypes():
raise InvalidSpec(
"'%s' cannot enumerate subtypes if parent '%s' does not." %
(self.name, self.parent_type.name),
self._ast_node.lineno, self._ast_node.path)
enumerated_subtype_names = set() # Set[str]
for subtype_field in subtype_fields:
path = subtype_field._ast_node.path
lineno = subtype_field._ast_node.lineno
# Require that a subtype only has a single type tag.
if subtype_field.data_type.name in enumerated_subtype_names:
raise InvalidSpec(
"Subtype '%s' can only be specified once." %
subtype_field.data_type.name, lineno, path)
# Require that a subtype has this struct as its parent.
if subtype_field.data_type.parent_type != self:
raise InvalidSpec(
"'%s' is not a subtype of '%s'." %
(subtype_field.data_type.name, self.name), lineno, path)
# Check for subtype tags that conflict with this struct's
# non-inherited fields.
if subtype_field.name in self._fields_by_name:
# Since the union definition comes first, use its line number
# as the source of the field's original declaration.
orig_field = self._fields_by_name[subtype_field.name]
raise InvalidSpec(
"Field '%s' already defined on line %d." %
(subtype_field.name, lineno),
orig_field._ast_node.lineno,
orig_field._ast_node.path)
# Walk up parent tree hierarchy to ensure no field conflicts.
# Checks for conflicts with subtype tags and regular fields.
cur_type = self.parent_type
while cur_type:
if subtype_field.name in cur_type._fields_by_name:
orig_field = cur_type._fields_by_name[subtype_field.name]
raise InvalidSpec(
"Field '%s' already defined in parent '%s' (%s:%d)."
% (subtype_field.name, cur_type.name,
orig_field._ast_node.path, orig_field._ast_node.lineno),
lineno, path)
cur_type = cur_type.parent_type
# Note the discrepancy between `fields` which contains only the
# struct fields, and `_fields_by_name` which contains the struct
# fields and enumerated subtype fields.
self._fields_by_name[subtype_field.name] = subtype_field
enumerated_subtype_names.add(subtype_field.data_type.name)
self._enumerated_subtypes.append(subtype_field)
assert len(self._enumerated_subtypes) > 0
# Check that all known subtypes are listed in the enumeration.
for subtype in self.subtypes:
if subtype.name not in enumerated_subtype_names:
raise InvalidSpec(
"'%s' does not enumerate all subtypes, missing '%s'" %
(self.name, subtype.name),
self._ast_node.lineno)
def get_all_subtypes_with_tags(self):
"""
Unlike other enumerated-subtypes-related functionality, this method
returns not just direct subtypes, but all subtypes of this struct. The
tag of each subtype is the list of tags from which the type descends.
This method only applies to structs that enumerate subtypes.
Use this when you need to generate a lookup table for a root struct
that maps a generated class representing a subtype to the tag it needs
in the serialized format.
Returns:
List[Tuple[List[String], Struct]]
"""
assert self.has_enumerated_subtypes(), 'Enumerated subtypes not set.'
subtypes_with_tags = [] # List[Tuple[List[String], Struct]]
fifo = deque([subtype_field.data_type
for subtype_field in self.get_enumerated_subtypes()])
# Traverse down the hierarchy registering subtypes as they're found.
while fifo:
data_type = fifo.popleft()
subtypes_with_tags.append((data_type._get_subtype_tags(), data_type))
if data_type.has_enumerated_subtypes():
for subtype_field in data_type.get_enumerated_subtypes():
fifo.append(subtype_field.data_type)
return subtypes_with_tags
def _get_subtype_tags(self):
"""
Returns a list of type tags that refer to this type starting from the
base of the struct hierarchy.
"""
assert self.is_member_of_enumerated_subtypes_tree(), \
'Not a part of a subtypes tree.'
cur = self.parent_type
cur_dt = self
tags = []
while cur:
assert cur.has_enumerated_subtypes()
for subtype_field in cur.get_enumerated_subtypes():
if subtype_field.data_type is cur_dt:
tags.append(subtype_field.name)
break
else:
assert False, 'Could not find?!'
cur_dt = cur
cur = cur.parent_type
tags.reverse()
return tuple(tags)
def _add_example(self, example):
"""Adds a "raw example" for this type.
This does basic sanity checking to ensure that the example is valid
(required fields specified, no unknown fields, correct types, ...).
The example is not available via :meth:`get_examples` until
:meth:`_compute_examples` is called.
Args:
example (stone.frontend.ast.AstExample): An example of this type.
"""
if self.has_enumerated_subtypes():
self._add_example_enumerated_subtypes_helper(example)
else:
self._add_example_helper(example)
def _add_example_enumerated_subtypes_helper(self, example):
"""Validates examples for structs with enumerated subtypes."""
if len(example.fields) != 1:
raise InvalidSpec(
'Example for struct with enumerated subtypes must only '
'specify one subtype tag.', example.lineno, example.path)
# Extract the only tag in the example.
example_field = list(example.fields.values())[0]
tag = example_field.name
val = example_field.value
if not isinstance(val, AstExampleRef):
raise InvalidSpec(
"Example of struct with enumerated subtypes must be a "
"reference to a subtype's example.",
example_field.lineno, example_field.path)
for subtype_field in self.get_enumerated_subtypes():
if subtype_field.name == tag:
self._raw_examples[example.label] = example
break
else:
raise InvalidSpec(
"Unknown subtype tag '%s' in example." % tag,
example_field.lineno, example_field.path)
def _add_example_helper(self, example):
"""Validates examples for structs without enumerated subtypes."""
# Check for fields in the example that don't belong.
for label, example_field in example.fields.items():
if not any(label == f.name for f in self.all_fields):
raise InvalidSpec(
"Example for '%s' has unknown field '%s'." %
(self.name, label),
example_field.lineno, example_field.path,
)
for field in self.all_fields:
if field.name in example.fields:
example_field = example.fields[field.name]
try:
field.data_type.check_example(example_field)
except InvalidSpec as e:
e.msg = "Bad example for field '{}': {}".format(
field.name, e.msg)
raise
elif field.has_default or isinstance(field.data_type, Nullable):
# These don't need examples.
pass
else:
raise InvalidSpec(
"Missing field '%s' in example." % field.name,
example.lineno, example.path)
self._raw_examples[example.label] = example
def _has_example(self, label):
"""Whether this data type has an example with the given ``label``."""
return label in self._raw_examples
def _compute_examples(self):
"""
Populates the ``_examples`` instance attribute by computing full
examples for each label in ``_raw_examples``.
The logic in this method is separate from :meth:`_add_example` because
this method requires that every type have ``_raw_examples`` assigned
for resolving example references.
"""
for label in self._raw_examples:
self._examples[label] = self._compute_example(label)
def _compute_example(self, label):
if self.has_enumerated_subtypes():
return self._compute_example_enumerated_subtypes(label)
else:
return self._compute_example_flat_helper(label)
def _compute_example_flat_helper(self, label):
"""
From the "raw example," resolves references to examples of other data
types to compute the final example.
Returns an Example object. The `value` attribute contains a
JSON-serializable representation of the example.
"""
assert label in self._raw_examples, label
example = self._raw_examples[label]
def deref_example_ref(dt, val):
dt, _ = unwrap_nullable(dt)
if not dt._has_example(val.label):
raise InvalidSpec(
"Reference to example for '%s' with label '%s' "
"does not exist." % (dt.name, val.label),
val.lineno, val.path)
return dt._compute_example(val.label).value
# Do a deep copy of the example because we're going to mutate it.
ex_val = OrderedDict()
def get_json_val(dt, val):
if isinstance(val, AstExampleRef):
# Embed references to other examples directly.
return deref_example_ref(dt, val)
elif isinstance(val, TagRef):
return val.union_data_type._compute_example(val.tag_name).value
elif isinstance(val, list):
dt, _ = unwrap_nullable(dt)
return [get_json_val(dt.data_type, v) for v in val]
elif isinstance(val, dict):
dt, _ = unwrap_nullable(dt)
if is_alias(dt):
return val
return {k: get_json_val(dt.value_data_type, v) for (k, v) in val.items()}
else:
return val
for field in self.all_fields:
if field.name in example.fields:
example_field = example.fields[field.name]
if example_field.value is None:
# Serialized format doesn't include fields with null.
pass
else:
ex_val[field.name] = get_json_val(
field.data_type, example_field.value)
elif field.has_default:
ex_val[field.name] = get_json_val(
field.data_type, field.default)
return Example(example.label, example.text, ex_val, ast_node=example)
def _compute_example_enumerated_subtypes(self, label):
"""
Analogous to :meth:`_compute_example_flat_helper` but for structs with
enumerated subtypes.
"""
assert label in self._raw_examples, label
example = self._raw_examples[label]
example_field = list(example.fields.values())[0]
for subtype_field in self.get_enumerated_subtypes():
if subtype_field.name == example_field.name:
data_type = subtype_field.data_type
break
ref = example_field.value
if not data_type._has_example(ref.label):
raise InvalidSpec(
"Reference to example for '%s' with label '%s' does not "
"exist." % (data_type.name, ref.label),
ref.lineno, ref.path)
ordered_value = OrderedDict([('.tag', example_field.name)])
flat_example = data_type._compute_example_flat_helper(ref.label)
ordered_value.update(flat_example.value)
flat_example.value = ordered_value
return flat_example
def __repr__(self):
return 'Struct(%r, %r)' % (self.name, self.fields)
class Union(UserDefined):
"""Defines a tagged union. Fields are variants."""
# pylint: disable=attribute-defined-outside-init
composite_type = 'union'
def __init__(self, name, namespace, ast_node, closed):
super(Union, self).__init__(name, namespace, ast_node)
self.closed = closed
# TODO: Why is this a different signature than the parent? Is this
# intentional?
def set_attributes(self, doc, fields, # pylint: disable=arguments-differ
parent_type=None, catch_all_field=None):
"""
:param UnionField catch_all_field: The field designated as the
catch-all. This field should be a member of the list of fields.
See :meth:`Composite.set_attributes` for parameter definitions.
"""
if parent_type:
assert isinstance(parent_type, Union)
super(Union, self).set_attributes(doc, fields, parent_type)
self.catch_all_field = catch_all_field
self.parent_type = parent_type
def check(self, val):
assert isinstance(val, TagRef)
for field in self.all_fields:
if val.tag_name == field.name:
if not is_void_type(field.data_type):
raise ValueError(
"invalid reference to non-void option '%s'" %
val.tag_name)
break
else:
raise ValueError(
"invalid reference to unknown tag '%s'" % val.tag_name)
def check_example(self, ex_field):
if not isinstance(ex_field.value, AstExampleRef):
raise InvalidSpec(
"example must reference label of '%s'" % self.name,
ex_field.lineno, ex_field.path)
def check_attr_repr(self, attr_field):
if not isinstance(attr_field.value, AstTagRef):
raise InvalidSpec(
'Expected union tag as value.',
attr_field.lineno, attr_field.path)
tag_ref = TagRef(self, attr_field.value.tag)
try:
self.check(tag_ref)
except ValueError as e:
raise InvalidSpec(e.args[0], attr_field.lineno, attr_field.path)
return tag_ref
@property
def all_fields(self):
"""
Returns a list of all fields. Subtype fields come before this type's
fields.
"""
fields = []
if self.parent_type:
fields.extend(self.parent_type.all_fields)
fields.extend([f for f in self.fields])
return fields
def _add_example(self, example):
"""Adds a "raw example" for this type.
This does basic sanity checking to ensure that the example is valid
(required fields specified, no unknown fields, correct types, ...).
The example is not available via :meth:`get_examples` until
:meth:`_compute_examples` is called.
Args:
example (stone.frontend.ast.AstExample): An example of this
type.
"""
if len(example.fields) != 1:
raise InvalidSpec(
'Example for union must specify exactly one tag.',
example.lineno, example.path)
# Extract the only tag in the example.
example_field = list(example.fields.values())[0]
tag = example_field.name
# Find the union member that corresponds to the tag.
for field in self.all_fields:
if tag == field.name:
break
else:
# Error: Tag doesn't match any union member.
raise InvalidSpec(
"Unknown tag '%s' in example." % tag,
example.lineno, example.path
)
# TODO: are we always guaranteed at least one field?
# pylint: disable=undefined-loop-variable
try:
field.data_type.check_example(example_field)
except InvalidSpec as e:
e.msg = "Bad example for field '{}': {}".format(
field.name, e.msg)
raise
self._raw_examples[example.label] = example
def _has_example(self, label):
"""Whether this data type has an example with the given ``label``."""
if label in self._raw_examples:
return True
else:
for field in self.all_fields:
dt, _ = unwrap_nullable(field.data_type)
if not is_user_defined_type(dt) and not is_void_type(dt):
continue
if label == field.name:
return True
else:
return False
def _compute_examples(self):
"""
Populates the ``_examples`` instance attribute by computing full
examples for each label in ``_raw_examples``.
The logic in this method is separate from :meth:`_add_example` because
this method requires that every type have ``_raw_examples`` assigned
for resolving example references.
"""
for label in self._raw_examples:
self._examples[label] = self._compute_example(label)
# Add examples for each void union member.
for field in self.all_fields:
dt, _ = unwrap_nullable(field.data_type)
if is_void_type(dt):
self._examples[field.name] = \
Example(
field.name, None, OrderedDict([('.tag', field.name)]))
def _compute_example(self, label):
"""
From the "raw example," resolves references to examples of other data
types to compute the final example.
Returns an Example object. The `value` attribute contains a
JSON-serializable representation of the example.
"""
if label in self._raw_examples:
example = self._raw_examples[label]
def deref_example_ref(dt, val):
dt, _ = unwrap_nullable(dt)
if not dt._has_example(val.label):
raise InvalidSpec(
"Reference to example for '%s' with label '%s' "
"does not exist." % (dt.name, val.label),
val.lineno, val.path)
return dt._compute_example(val.label).value
def get_json_val(dt, val):
if isinstance(val, AstExampleRef):
# Embed references to other examples directly.
return deref_example_ref(dt, val)
elif isinstance(val, list):
return [get_json_val(dt.data_type, v) for v in val]
else:
return val
example_field = list(example.fields.values())[0]
# Do a deep copy of the example because we're going to mutate it.
ex_val = OrderedDict([('.tag', example_field.name)])
for field in self.all_fields:
if field.name == example_field.name:
break
# TODO: are we always guaranteed at least one field?
# pylint: disable=undefined-loop-variable
data_type, _ = unwrap_nullable(field.data_type)
inner_ex_val = get_json_val(data_type, example_field.value)
if (isinstance(data_type, Struct) and
not data_type.has_enumerated_subtypes()):
ex_val.update(inner_ex_val)
else:
if inner_ex_val is not None:
ex_val[field.name] = inner_ex_val
return Example(example.label, example.text, ex_val, ast_node=example)
else:
# Try to fallback to a union member with tag matching the label
# with a data type that is composite or void.
for field in self.all_fields:
if label == field.name:
break
else:
raise AssertionError('No example for label %r' % label)
# TODO: are we always guaranteed at least one field?
# pylint: disable=undefined-loop-variable
assert is_void_type(field.data_type)
return Example(
field.name, field.doc, OrderedDict([('.tag', field.name)]))
def unique_field_data_types(self):
"""
Checks if all variants have different data types.
If so, the selected variant can be determined just by the data type of
the value without needing a field name / tag. In some languages, this
lets us make a shortcut
"""
data_type_names = set()
for field in self.fields:
if not is_void_type(field.data_type):
if field.data_type.name in data_type_names:
return False
else:
data_type_names.add(field.data_type.name)
else:
return True
def __repr__(self):
return 'Union(%r, %r)' % (self.name, self.fields)
class TagRef(object):
"""
Used when an ID in Stone refers to a tag of a union.
TODO(kelkabany): Support tag values.
"""
def __init__(self, union_data_type, tag_name):
self.union_data_type = union_data_type
self.tag_name = tag_name
def __repr__(self):
return 'TagRef(%r, %r)' % (self.union_data_type, self.tag_name)
class Annotation(object):
"""
Used when a field is annotated with a pre-defined Stone action.
"""
def __init__(self, name, namespace, ast_node):
self.name = name
self.namespace = namespace
self._ast_node = ast_node
class Deprecated(Annotation):
"""
Used when a field is annotated for deprecation.
"""
def __repr__(self):
return 'Deprecated(%r, %r)' % (self.name, self.namespace)
class Omitted(Annotation):
"""
Used when a field is annotated for omission.
"""
def __init__(self, name, namespace, ast_node, omitted_caller):
super(Omitted, self).__init__(name, namespace, ast_node)
self.omitted_caller = omitted_caller
def __repr__(self):
return 'Omitted(%r, %r, %r)' % (self.name, self.namespace, self.omitted_caller)
class Preview(Annotation):
"""
Used when a field is annotated for previewing.
"""
def __repr__(self):
return 'Preview(%r, %r)' % (self.name, self.namespace)
class Redacted(Annotation):
"""
Used when a field is annotated for redaction.
"""
def __init__(self, name, namespace, ast_node, regex=None):
super(Redacted, self).__init__(name, namespace, ast_node)
self.regex = regex
class RedactedBlot(Redacted):
"""
Used when a field is annotated to be blotted.
"""
def __repr__(self):
return 'RedactedBlot(%r, %r, %r)' % (self.name, self.namespace, self.regex)
class RedactedHash(Redacted):
"""
Used when a field is annotated to be hashed.
"""
def __repr__(self):
return 'RedactedHash(%r, %r, %r)' % (self.name, self.namespace, self.regex)
class Alias(Composite):
"""
NOTE: The categorization of aliases as a composite type is arbitrary.
It fit here better than as a primitive or user-defined type.
"""
def __init__(self, name, namespace, ast_node):
"""
When this is instantiated, the type is treated as a forward reference.
Only when :meth:`set_attributes` is called is the type considered to
be fully defined.
:param str name: Name of type.
:param stone.ir.ApiNamespace namespace: The namespace this type is
defined in.
:param ast_node: Raw type definition from the parser.
:type ast_node: stone.frontend.ast.AstTypeDef
"""
super(Alias, self).__init__()
self._name = name
self.namespace = namespace
self._ast_node = ast_node
# Populated by :meth:`set_attributes`
self.raw_doc = None
self.doc = None
self.data_type = None
self.redactor = None
def set_annotations(self, annotations):
for annotation in annotations:
if isinstance(annotation, Redacted):
# Make sure we don't set multiple conflicting annotations on one alias
if self.redactor:
raise InvalidSpec("Redactor already set as %r" %
str(self.redactor), self._ast_node.lineno)
self.redactor = annotation
else:
raise InvalidSpec("Aliases only support 'Redacted', not %r" %
str(annotation), self._ast_node.lineno)
def set_attributes(self, doc, data_type):
"""
:param Optional[str] doc: Documentation string of alias.
:param data_type: The source data type referenced by the alias.
"""
self.raw_doc = doc
self.doc = doc_unwrap(doc)
self.data_type = data_type
# Make sure we don't have a cyclic reference.
# Since attributes are set one data type at a time, only the last data
# type to be populated in a cycle will be able to detect the cycle.
# Before that, the cycle will be broken by an alias with no populated
# source.
cur_data_type = data_type
while is_alias(cur_data_type):
cur_data_type = cur_data_type.data_type
if cur_data_type == self:
raise InvalidSpec(
"Alias '%s' is part of a cycle." % self.name,
self._ast_node.lineno, self._ast_node.path)
@property
def name(self):
return self._name
def check(self, val):
return self.data_type.check(val)
def check_example(self, ex_field):
# TODO: Assert that this isn't a user-defined type.
return self.data_type.check_example(ex_field)
def _has_example(self, label):
# TODO: Assert that this is a user-defined type
return self.data_type._has_example(label)
def _compute_example(self, label):
return self.data_type._compute_example(label)
def check_attr_repr(self, attr_field):
return self.data_type.check_attr_repr(attr_field)
def __repr__(self):
return 'Alias(%r, %r)' % (self.name, self.data_type)
def unwrap_nullable(data_type):
"""
Convenience method to unwrap Nullable from around a DataType.
Args:
data_type (DataType): The target to unwrap.
Return:
Tuple[DataType, bool]: The underlying data type and a bool indicating
whether the input type was nullable.
"""
if is_nullable_type(data_type):
return data_type.data_type, True
else:
return data_type, False
def unwrap_aliases(data_type):
"""
Convenience method to unwrap all Alias(es) from around a DataType.
Args:
data_type (DataType): The target to unwrap.
Return:
Tuple[DataType, bool]: The underlying data type and a bool indicating
whether the input type had at least one alias layer.
"""
unwrapped_alias = False
while is_alias(data_type):
unwrapped_alias = True
data_type = data_type.data_type
return data_type, unwrapped_alias
def unwrap(data_type):
"""
Convenience method to unwrap all Aliases and Nullables from around a
DataType. This checks for nullable wrapping aliases, as well as aliases
wrapping nullables.
Args:
data_type (DataType): The target to unwrap.
Return:
Tuple[DataType, bool, bool]: The underlying data type; a bool that is
set if a nullable was present; a bool that is set if an alias was
present.
"""
unwrapped_nullable = False
unwrapped_alias = False
while is_alias(data_type) or is_nullable_type(data_type):
if is_nullable_type(data_type):
unwrapped_nullable = True
if is_alias(data_type):
unwrapped_alias = True
data_type = data_type.data_type
return data_type, unwrapped_nullable, unwrapped_alias
def is_alias(data_type):
return isinstance(data_type, Alias)
def is_bytes_type(data_type):
return isinstance(data_type, Bytes)
def is_boolean_type(data_type):
return isinstance(data_type, Boolean)
def is_composite_type(data_type):
return isinstance(data_type, Composite)
def is_float_type(data_type):
return isinstance(data_type, (Float32, Float64))
def is_integer_type(data_type):
return isinstance(data_type, (UInt32, UInt64, Int32, Int64))
def is_list_type(data_type):
return isinstance(data_type, List)
def is_map_type(data_type):
return isinstance(data_type, Map)
def is_nullable_type(data_type):
return isinstance(data_type, Nullable)
def is_numeric_type(data_type):
return is_integer_type(data_type) or is_float_type(data_type)
def is_primitive_type(data_type):
return isinstance(data_type, Primitive)
def is_string_type(data_type):
return isinstance(data_type, String)
def is_struct_type(data_type):
return isinstance(data_type, Struct)
def is_tag_ref(val):
return isinstance(val, TagRef)
def is_timestamp_type(data_type):
return isinstance(data_type, Timestamp)
def is_union_type(data_type):
return isinstance(data_type, Union)
def is_user_defined_type(data_type):
return isinstance(data_type, UserDefined)
def is_void_type(data_type):
return isinstance(data_type, Void)
| {
"content_hash": "deb93e29080989bd64b8507d67cd307e",
"timestamp": "",
"source": "github",
"line_count": 1831,
"max_line_length": 98,
"avg_line_length": 36.3604587657018,
"alnum_prop": 0.5759132420091324,
"repo_name": "posita/stone",
"id": "aeae3dd24dcfdb8c0a5098541fb89edf25b475eb",
"size": "66576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stone/ir/data_types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Objective-C",
"bytes": "23077"
},
{
"name": "Python",
"bytes": "922270"
},
{
"name": "Shell",
"bytes": "300"
},
{
"name": "Swift",
"bytes": "17212"
}
],
"symlink_target": ""
} |
import pytest
from mock import patch, call, Mock, MagicMock
import os
from blt.tools import bundle
# -- Set Fixtures -------------------------------------------------------------
@pytest.fixture
def cmds():
config = {
'bundle': {
'BUNDLE_ROOT': '/tmp/bundles/',
'LOCAL': 'sqlite://///Users/laphroaig/.virtualenvs/payinstr/db.sqlite3',
'STAGING': 'sqlite://///Users/lagavulin/.virtualenvs/payinstr/db.sqlite3'
},
'django': {
'DJANGO_ROOT': 'djangoproj'
}
}
return bundle.BundleCommands(config)
# -- Setup/Teardown -----------------------------------------------------------
# Setup Module-wide mocks
bundle.local = Mock()
bundle.cd = MagicMock()
def teardown_function(function):
"""this is called after every test case runs"""
bundle.local.reset_mock()
bundle.cd.reset_mock()
# -- Test Cases! --------------------------------------------------------------
def test_dump(cmds):
bundle.makedirs = Mock()
cmds.dump('scotch-scotch-scotch', 'LOCAL')
bundle.cd.assert_called_once_with('djangoproj')
bundle.makedirs.assert_called_once_with('/tmp/bundles/scotch-scotch-scotch/db')
assert os.environ.get('DATABASE_URL') == 'sqlite://///Users/laphroaig/.virtualenvs/payinstr/db.sqlite3'
bundle.local.assert_called_once_with('python manage.py bundle dump scotch-scotch-scotch /tmp/bundles/scotch-scotch-scotch/db/fixture.xml')
def test_load(cmds):
cmds.load('scotch-scotch-scotch', 'STAGING')
bundle.cd.assert_called_once_with('djangoproj')
assert os.environ.get('DATABASE_URL') == 'sqlite://///Users/lagavulin/.virtualenvs/payinstr/db.sqlite3'
bundle.local.assert_called_once_with('python manage.py bundle load /tmp/bundles/scotch-scotch-scotch/db/fixture.xml')
def test_delete(cmds):
cmds.delete('scotch-scotch-scotch', 'STAGING')
bundle.cd.assert_called_once_with('djangoproj')
assert os.environ.get('DATABASE_URL') == 'sqlite://///Users/lagavulin/.virtualenvs/payinstr/db.sqlite3'
bundle.local.assert_called_once_with('python manage.py bundle delete scotch-scotch-scotch')
| {
"content_hash": "4f3a074a55188be2be09f4e1299c15d7",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 142,
"avg_line_length": 36.87931034482759,
"alnum_prop": 0.6250584385226742,
"repo_name": "dencold/blt",
"id": "945d00ba196249d071080a0ca10711c22a6b8dcd",
"size": "2139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blt/test/test_bundle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76067"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import itertools
import logging
import thread
import urlparse
from django.conf import settings # noqa
import glanceclient as glance_client
from openstack_dashboard.api import base
LOG = logging.getLogger(__name__)
def glanceclient(request):
o = urlparse.urlparse(base.url_for(request, 'image'))
url = "://".join((o.scheme, o.netloc))
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
LOG.debug('glanceclient connection created using token "%s" and url "%s"'
% (request.user.token.id, url))
return glance_client.Client('1', url, token=request.user.token.id,
insecure=insecure)
def image_delete(request, image_id):
return glanceclient(request).images.delete(image_id)
def image_get(request, image_id):
"""
Returns an Image object populated with metadata for image
with supplied identifier.
"""
return glanceclient(request).images.get(image_id)
def image_list_detailed(request, marker=None, filters=None, paginate=False):
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 20)
if paginate:
request_size = page_size + 1
else:
request_size = limit
kwargs = {'filters': filters or {}}
if marker:
kwargs['marker'] = marker
images_iter = glanceclient(request).images.list(page_size=request_size,
limit=limit,
**kwargs)
has_more_data = False
if paginate:
images = list(itertools.islice(images_iter, request_size))
if len(images) > page_size:
images.pop(-1)
has_more_data = True
else:
images = list(images_iter)
return (images, has_more_data)
def image_update(request, image_id, **kwargs):
return glanceclient(request).images.update(image_id, **kwargs)
def image_create(request, **kwargs):
copy_from = None
if kwargs.get('copy_from'):
copy_from = kwargs.pop('copy_from')
image = glanceclient(request).images.create(**kwargs)
if copy_from:
thread.start_new_thread(image_update,
(request, image.id),
{'copy_from': copy_from})
return image
| {
"content_hash": "6a75df8b2c47060736622c3412531d42",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 77,
"avg_line_length": 28.261904761904763,
"alnum_prop": 0.6128896377422073,
"repo_name": "deepakselvaraj/federated-horizon",
"id": "ca9db82d0095deb5b69840ba141426b9aac72240",
"size": "3183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/api/glance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import MixinABC, _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_update_request(
resource_group_name: str,
automation_account_name: str,
subscription_id: str,
*,
json: Optional[_models.AutomationAccountUpdateParameters] = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-06-22")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'),
"automationAccountName": _SERIALIZER.url("automation_account_name", automation_account_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
if content_type is not None:
_headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=_url,
params=_params,
headers=_headers,
json=json,
content=content,
**kwargs
)
def build_create_or_update_request(
resource_group_name: str,
automation_account_name: str,
subscription_id: str,
*,
json: Optional[_models.AutomationAccountCreateOrUpdateParameters] = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-06-22")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'),
"automationAccountName": _SERIALIZER.url("automation_account_name", automation_account_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
if content_type is not None:
_headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=_url,
params=_params,
headers=_headers,
json=json,
content=content,
**kwargs
)
def build_delete_request(
resource_group_name: str,
automation_account_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-06-22")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'),
"automationAccountName": _SERIALIZER.url("automation_account_name", automation_account_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_get_request(
resource_group_name: str,
automation_account_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-06-22")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'),
"automationAccountName": _SERIALIZER.url("automation_account_name", automation_account_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_list_by_resource_group_request(
resource_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-06-22")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_list_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-06-22")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Automation/automationAccounts")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
class AutomationAccountOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.automation.AutomationClient`'s
:attr:`automation_account` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def update(
self,
resource_group_name: str,
automation_account_name: str,
parameters: _models.AutomationAccountUpdateParameters,
**kwargs: Any
) -> _models.AutomationAccount:
"""Update an automation account.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param parameters: Parameters supplied to the update automation account.
:type parameters: ~azure.mgmt.automation.models.AutomationAccountUpdateParameters
:keyword api_version: Api Version. Default value is "2021-06-22". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AutomationAccount, or the result of cls(response)
:rtype: ~azure.mgmt.automation.models.AutomationAccount
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-06-22")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.AutomationAccount]
_json = self._serialize.body(parameters, 'AutomationAccountUpdateParameters')
request = build_update_request(
resource_group_name=resource_group_name,
automation_account_name=automation_account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AutomationAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}"} # type: ignore
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
automation_account_name: str,
parameters: _models.AutomationAccountCreateOrUpdateParameters,
**kwargs: Any
) -> _models.AutomationAccount:
"""Create or update automation account.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param parameters: Parameters supplied to the create or update automation account.
:type parameters: ~azure.mgmt.automation.models.AutomationAccountCreateOrUpdateParameters
:keyword api_version: Api Version. Default value is "2021-06-22". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AutomationAccount, or the result of cls(response)
:rtype: ~azure.mgmt.automation.models.AutomationAccount
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-06-22")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.AutomationAccount]
_json = self._serialize.body(parameters, 'AutomationAccountCreateOrUpdateParameters')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
automation_account_name=automation_account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AutomationAccount', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AutomationAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}"} # type: ignore
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
automation_account_name: str,
**kwargs: Any
) -> None:
"""Delete an automation account.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:keyword api_version: Api Version. Default value is "2021-06-22". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-06-22")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
automation_account_name=automation_account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}"} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
automation_account_name: str,
**kwargs: Any
) -> _models.AutomationAccount:
"""Get information about an Automation Account.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:keyword api_version: Api Version. Default value is "2021-06-22". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AutomationAccount, or the result of cls(response)
:rtype: ~azure.mgmt.automation.models.AutomationAccount
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-06-22")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.AutomationAccount]
request = build_get_request(
resource_group_name=resource_group_name,
automation_account_name=automation_account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AutomationAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}"} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable[_models.AutomationAccountListResult]:
"""Retrieve a list of accounts within a given resource group.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:keyword api_version: Api Version. Default value is "2021-06-22". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AutomationAccountListResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.automation.models.AutomationAccountListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-06-22")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.AutomationAccountListResult]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AutomationAccountListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts"} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable[_models.AutomationAccountListResult]:
"""Lists the Automation Accounts within an Azure subscription.
Retrieve a list of accounts within a given subscription.
:keyword api_version: Api Version. Default value is "2021-06-22". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AutomationAccountListResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.automation.models.AutomationAccountListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-06-22")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.AutomationAccountListResult]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AutomationAccountListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Automation/automationAccounts"} # type: ignore
| {
"content_hash": "2da1a471eb46aa81c0ff18857c2bfde2",
"timestamp": "",
"source": "github",
"line_count": 726,
"max_line_length": 214,
"avg_line_length": 42.80440771349862,
"alnum_prop": 0.6401403011970652,
"repo_name": "Azure/azure-sdk-for-python",
"id": "777057c256cb4c1a72ec385fd9f8cb330a9ddfe7",
"size": "31576",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/automation/azure-mgmt-automation/azure/mgmt/automation/operations/_automation_account_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from __future__ import division
from mapproxy.compat.image import Image, transform_uses_center
from mapproxy.image import ImageSource, image_filter
from mapproxy.srs import make_lin_transf, bbox_equals
class ImageTransformer(object):
"""
Transform images between different bbox and spatial reference systems.
:note: The transformation doesn't make a real transformation for each pixel,
but a mesh transformation (see `PIL Image.transform`_).
It will divide the target image into rectangles (a mesh). The
source coordinates for each rectangle vertex will be calculated.
The quadrilateral will then be transformed with the source coordinates
into the destination quad (affine).
The number of quads is calculated dynamically to keep the deviation in
the image transformation below one pixel.
.. _PIL Image.transform:
http://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.transform
::
src quad dst quad
.----. <- coord- .----.
/ / transformation | |
/ / | |
.----. img-transformation -> .----.----
| | |
---------------.
large src image large dst image
"""
def __init__(self, src_srs, dst_srs, max_px_err=1):
"""
:param src_srs: the srs of the source image
:param dst_srs: the srs of the target image
:param resampling: the resampling method used for transformation
:type resampling: nearest|bilinear|bicubic
"""
self.src_srs = src_srs
self.dst_srs = dst_srs
self.dst_bbox = self.dst_size = None
self.max_px_err = max_px_err
def transform(self, src_img, src_bbox, dst_size, dst_bbox, image_opts):
"""
Transforms the `src_img` between the source and destination SRS
of this ``ImageTransformer`` instance.
When the ``src_srs`` and ``dst_srs`` are equal the image will be cropped
and not transformed. If the `src_bbox` and `dst_bbox` are equal,
the `src_img` itself will be returned.
:param src_img: the source image for the transformation
:param src_bbox: the bbox of the src_img
:param dst_size: the size of the result image (in pizel)
:type dst_size: ``(int(width), int(height))``
:param dst_bbox: the bbox of the result image
:return: the transformed image
:rtype: `ImageSource`
"""
if self._no_transformation_needed(src_img.size, src_bbox, dst_size, dst_bbox):
return src_img
if self.src_srs == self.dst_srs:
result = self._transform_simple(src_img, src_bbox, dst_size, dst_bbox,
image_opts)
else:
result = self._transform(src_img, src_bbox, dst_size, dst_bbox, image_opts)
result.cacheable = src_img.cacheable
return result
def _transform_simple(self, src_img, src_bbox, dst_size, dst_bbox, image_opts):
"""
Do a simple crop/extent transformation.
"""
src_quad = (0, 0, src_img.size[0], src_img.size[1])
to_src_px = make_lin_transf(src_bbox, src_quad)
minx, miny = to_src_px((dst_bbox[0], dst_bbox[3]))
maxx, maxy = to_src_px((dst_bbox[2], dst_bbox[1]))
src_res = ((src_bbox[0]-src_bbox[2])/src_img.size[0],
(src_bbox[1]-src_bbox[3])/src_img.size[1])
dst_res = ((dst_bbox[0]-dst_bbox[2])/dst_size[0],
(dst_bbox[1]-dst_bbox[3])/dst_size[1])
tenth_px_res = (abs(dst_res[0]/(dst_size[0]*10)),
abs(dst_res[1]/(dst_size[1]*10)))
if (abs(src_res[0]-dst_res[0]) < tenth_px_res[0] and
abs(src_res[1]-dst_res[1]) < tenth_px_res[1]):
# rounding might result in subpixel inaccuracy
# this exact resolutioni match should only happen in clients with
# fixed resolutions like OpenLayers
minx = int(round(minx))
miny = int(round(miny))
result = src_img.as_image().crop((minx, miny,
minx+dst_size[0], miny+dst_size[1]))
else:
img = img_for_resampling(src_img.as_image(), image_opts.resampling)
result = img.transform(dst_size, Image.EXTENT,
(minx, miny, maxx, maxy),
image_filter[image_opts.resampling])
return ImageSource(result, size=dst_size, image_opts=image_opts)
def _transform(self, src_img, src_bbox, dst_size, dst_bbox, image_opts):
"""
Do a 'real' transformation with a transformed mesh (see above).
"""
meshes = transform_meshes(
src_size=src_img.size,
src_bbox=src_bbox,
src_srs=self.src_srs,
dst_size=dst_size,
dst_bbox=dst_bbox,
dst_srs=self.dst_srs,
max_px_err=self.max_px_err,
)
img = img_for_resampling(src_img.as_image(), image_opts.resampling)
result = img.transform(dst_size, Image.MESH, meshes,
image_filter[image_opts.resampling])
if False:
# draw mesh for debuging
from PIL import ImageDraw
draw = ImageDraw.Draw(result)
for g, _ in meshes:
draw.rectangle(g, fill=None, outline=(255, 0, 0))
return ImageSource(result, size=dst_size, image_opts=image_opts)
def _no_transformation_needed(self, src_size, src_bbox, dst_size, dst_bbox):
"""
>>> src_bbox = (-2504688.5428486541, 1252344.271424327,
... -1252344.271424327, 2504688.5428486541)
>>> dst_bbox = (-2504688.5431999983, 1252344.2704,
... -1252344.2719999983, 2504688.5416000001)
>>> from mapproxy.srs import SRS
>>> t = ImageTransformer(SRS(900913), SRS(900913))
>>> t._no_transformation_needed((256, 256), src_bbox, (256, 256), dst_bbox)
True
"""
xres = (dst_bbox[2]-dst_bbox[0])/dst_size[0]
yres = (dst_bbox[3]-dst_bbox[1])/dst_size[1]
return (src_size == dst_size and
self.src_srs == self.dst_srs and
bbox_equals(src_bbox, dst_bbox, xres/10, yres/10))
def transform_meshes(src_size, src_bbox, src_srs, dst_size, dst_bbox, dst_srs, max_px_err=1):
"""
transform_meshes creates a list of QUAD transformation parameters for PIL's
MESH image transformation.
Each QUAD is a rectangle in the destination image, like ``(0, 0, 100, 100)`` and
a list of four pixel coordinates in the source image that match the destination rectangle.
The four points form a quadliteral (i.e. not a rectangle).
PIL's image transform uses affine transformation to fill each rectangle in the destination
image with data from the source quadliteral.
The number of QUADs is calculated dynamically to keep the deviation in the image
transformation below one pixel. Image transformations for large map scales can be transformed with
1-4 QUADs most of the time. For low scales, transform_meshes can generate a few hundred QUADs.
It generates a maximum of one QUAD per 50 pixel.
"""
src_bbox = src_srs.align_bbox(src_bbox)
dst_bbox = dst_srs.align_bbox(dst_bbox)
src_rect = (0, 0, src_size[0], src_size[1])
dst_rect = (0, 0, dst_size[0], dst_size[1])
to_src_px = make_lin_transf(src_bbox, src_rect)
to_src_w = make_lin_transf(src_rect, src_bbox)
to_dst_w = make_lin_transf(dst_rect, dst_bbox)
meshes = []
# more recent versions of Pillow use center coordinates for
# transformations, we manually need to add half a pixel otherwise
if transform_uses_center():
px_offset = 0.0
else:
px_offset = 0.5
def dst_quad_to_src(quad):
src_quad = []
for dst_px in [(quad[0], quad[1]), (quad[0], quad[3]),
(quad[2], quad[3]), (quad[2], quad[1])]:
dst_w = to_dst_w(
(dst_px[0] + px_offset, dst_px[1] + px_offset))
src_w = dst_srs.transform_to(src_srs, dst_w)
src_px = to_src_px(src_w)
src_quad.extend(src_px)
return quad, src_quad
res = (dst_bbox[2] - dst_bbox[0]) / dst_size[0]
max_err = max_px_err * res
def is_good(quad, src_quad):
w = quad[2] - quad[0]
h = quad[3] - quad[1]
if w < 50 or h < 50:
return True
xc = quad[0] + w / 2.0 - 0.5
yc = quad[1] + h / 2.0 - 0.5
# coordinate for the center of the quad
dst_w = to_dst_w((xc, yc))
# actual coordinate for the center of the quad
src_px = center_quad_transform(quad, src_quad)
real_dst_w = src_srs.transform_to(dst_srs, to_src_w(src_px))
err = max(abs(dst_w[0] - real_dst_w[0]), abs(dst_w[1] - real_dst_w[1]))
return err < max_err
# recursively add meshes. divide each quad into four sub quad till
# accuracy is good enough.
def add_meshes(quads):
for quad in quads:
quad, src_quad = dst_quad_to_src(quad)
if is_good(quad, src_quad):
meshes.append((quad, src_quad))
else:
add_meshes(divide_quad(quad))
add_meshes([(0, 0, dst_size[0], dst_size[1])])
return meshes
def center_quad_transform(quad, src_quad):
"""
center_quad_transfrom transforms the center pixel coordinates
from ``quad`` to ``src_quad`` by using affine transformation
as used by PIL.Image.transform.
"""
w = quad[2] - quad[0]
h = quad[3] - quad[1]
nw = src_quad[0:2]
sw = src_quad[2:4]
se = src_quad[4:6]
ne = src_quad[6:8]
x0, y0 = nw
As = 1.0 / w
At = 1.0 / h
a0 = x0
a1 = (ne[0] - x0) * As
a2 = (sw[0] - x0) * At
a3 = (se[0] - sw[0] - ne[0] + x0) * As * At
a4 = y0
a5 = (ne[1] - y0) * As
a6 = (sw[1] - y0) * At
a7 = (se[1] - sw[1] - ne[1] + y0) * As * At
x = w / 2.0 - 0.5
y = h / 2.0 - 0.5
return (
a0 + a1*x + a2*y + a3*x*y,
a4 + a5*x + a6*y + a7*x*y
)
def img_for_resampling(img, resampling):
"""
Convert P images to RGB(A) for non-NEAREST resamplings.
"""
resampling = image_filter[resampling]
if img.mode == 'P' and resampling != Image.NEAREST:
img.load() # load to get actual palette mode
if img.palette is not None:
# palette can still be None for cropped images
img = img.convert(img.palette.mode)
else:
img = img.convert('RGBA')
return img
def divide_quad(quad):
"""
divide_quad in up to four sub quads. Only divide horizontal if quad is twice as wide then high,
and vertical vice versa.
PIL.Image.transform expects that the lower-right corner
of a quad overlaps by one pixel.
>>> divide_quad((0, 0, 500, 500))
[(0, 0, 250, 250), (250, 0, 500, 250), (0, 250, 250, 500), (250, 250, 500, 500)]
>>> divide_quad((0, 0, 2000, 500))
[(0, 0, 1000, 500), (1000, 0, 2000, 500)]
>>> divide_quad((100, 200, 200, 500))
[(100, 200, 200, 350), (100, 350, 200, 500)]
"""
w = quad[2] - quad[0]
h = quad[3] - quad[1]
xc = int(quad[0] + w/2)
yc = int(quad[1] + h/2)
if w > 2*h:
return [
(quad[0], quad[1], xc, quad[3]),
(xc, quad[1], quad[2], quad[3]),
]
if h > 2*w:
return [
(quad[0], quad[1], quad[2], yc),
(quad[0], yc, quad[2], quad[3]),
]
return [
(quad[0], quad[1], xc, yc),
(xc, quad[1], quad[2], yc),
(quad[0], yc, xc, quad[3]),
(xc, yc, quad[2], quad[3]),
]
| {
"content_hash": "0e2279dca958ee1db7d3172cfa853d6c",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 102,
"avg_line_length": 37.20987654320987,
"alnum_prop": 0.5512607830126078,
"repo_name": "olt/mapproxy",
"id": "a96042da61672b526363776815127d13d478c768",
"size": "12700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mapproxy/image/transform.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12401"
},
{
"name": "HTML",
"bytes": "18782"
},
{
"name": "Makefile",
"bytes": "1045"
},
{
"name": "Python",
"bytes": "1744780"
}
],
"symlink_target": ""
} |
import roax.context as context
import roax.schema as s
import unittest
from roax.resource import Forbidden, Resource, operation
from roax.security import ContextSecurityRequirement, SecurityRequirement, nested
class Never(SecurityRequirement):
def authorized(self):
raise Forbidden
req1 = ContextSecurityRequirement(req1=True)
never = Never()
class R1(Resource):
@operation(type="action", params={}, returns=s.str(), security=[req1])
def foo(self):
return "foo_success"
@operation(type="action", params={}, returns=s.str(), security=[req1, never])
def bar(self):
return "bar_success"
@operation(type="action", params={}, returns=s.str(), security=[nested])
def nestee(self):
return "nest_success"
@operation(type="action", params={}, returns=s.str())
def nester(self):
return self.nestee()
class TestSecurity(unittest.TestCase):
def test_security_req_success(self):
r1 = R1()
with context.push(req1=True):
self.assertEqual(r1.foo(), "foo_success")
def test_security_req_unauth(self):
r1 = R1()
with self.assertRaises(Forbidden):
r1.foo()
def test_security_req_multiple_unnested(self):
r1 = R1()
for n in range(0, 3):
with self.assertRaises(Forbidden):
r1.nestee()
def test_security_req_nested(self):
r1 = R1()
self.assertEqual(r1.nester(), "nest_success")
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "2855425766311d99a6331cc7d8087ba3",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 81,
"avg_line_length": 25.966101694915253,
"alnum_prop": 0.6305483028720626,
"repo_name": "pbryan/roax",
"id": "9275c99b667903ab633d08095165d6f776517c5a",
"size": "1532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_security.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182458"
}
],
"symlink_target": ""
} |
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
class EmailClient(object):
"""
Email Client to manage connection information for SMTP server
"""
def __init__(self, host, port, username, password, use_ssl=False):
self.host = host
self.port = port
self.username = username
self.password = password
self.use_ssl = use_ssl
def _create_connection(self):
if self.use_ssl:
server = smtplib.SMTP_SSL(self.host, self.port)
else:
server = smtplib.SMTP(self.host, self.port)
server.ehlo()
server.login(self.username, self.password)
return server
def send(self, from_address, to_address, subject, html_message, plain_message=None):
server = self._create_connection()
message = MIMEMultipart()
message['Subject'] = subject
message['From'] = from_address
message['To'] = to_address
if plain_message:
message.attach(MIMEText(plain_message, 'plain'))
message.attach(MIMEText(html_message, 'html'))
server.sendmail(from_address, to_address, message.as_string())
server.close()
def email_jobs(self, jobs, from_address, to_address, use_ssl):
plural = 's' if len(jobs) > 1 else ''
html_message = '<html>{}</html>'.format(
'<br \>'.join(['<a href="{}">{}</a>'.format(job['url'], job['jobtitle']) for job in jobs]))
self.send(
from_address,
to_address,
'Top of the Pile: Found {} Job{}'.format(len(jobs), plural),
html_message,
use_ssl.lower() == 'true')
| {
"content_hash": "23d50e1ba71c3bf51773c991ba526207",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 103,
"avg_line_length": 33.431372549019606,
"alnum_prop": 0.5882697947214076,
"repo_name": "Brok-Bucholtz/TopOfThePile",
"id": "d5ea258cc33fc4e5515abc17b7da93b582c30973",
"size": "1705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "topofthepile/email_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12241"
}
],
"symlink_target": ""
} |
"""Supports OMNI Combined, Definitive, IMF and Plasma Data, and Energetic
Proton Fluxes, Time-Shifted to the Nose of the Earth's Bow Shock, plus Solar
and Magnetic Indices. Downloads data from the NASA Coordinated Data Analysis
Web (CDAWeb). Supports both 5 and 1 minute files.
Parameters
----------
platform : string
'omni'
name : string
'hro'
tag : string
Select time between samples, one of {'1min', '5min'}
Note
----
Files are stored by the first day of each month. When downloading use
omni.download(start, stop, freq='MS') to only download days that could possibly
have data. 'MS' gives a monthly start frequency.
This material is based upon work supported by the
National Science Foundation under Grant Number 1259508.
Any opinions, findings, and conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views
of the National Science Foundation.
Warnings
--------
- Currently no cleaning routine. Though the CDAWEB description indicates that
these level-2 products are expected to be ok.
- Module not written by OMNI team.
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import functools
import pandas as pds
import numpy as np
import pysat
platform = 'omni'
name = 'hro'
tags = {'1min':'1-minute time averaged data',
'5min':'5-minute time averaged data'}
sat_ids = {'':['1min', '5min']}
test_dates = {'':{'1min':pysat.datetime(2009,1,1),
'5min':pysat.datetime(2009,1,1)}}
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for chosen satellite data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '1min' and '5min'.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
"""
if format_str is None and data_path is not None:
if (tag == '1min') | (tag == '5min'):
min_fmt = ''.join(['omni_hro_', tag,
'{year:4d}{month:02d}{day:02d}_v01.cdf'])
files = pysat.Files.from_os(data_path=data_path, format_str=min_fmt)
# files are by month, just add date to monthly filename for
# each day of the month. load routine will use date to select out appropriate
# data
if not files.empty:
files.ix[files.index[-1] + pds.DateOffset(months=1) -
pds.DateOffset(days=1)] = files.iloc[-1]
files = files.asfreq('D', 'pad')
# add the date to the filename
files = files + '_' + files.index.strftime('%Y-%m-%d')
return files
else:
raise ValueError('Unknown tag')
elif format_str is None:
estr = 'A directory must be passed to the loading routine for VEFI'
raise ValueError (estr)
else:
return pysat.Files.from_os(data_path=data_path, format_str=format_str)
def load(fnames, tag=None, sat_id=None):
import pysatCDF
if len(fnames) <= 0 :
return pysat.DataFrame(None), None
else:
# pull out date appended to filename
fname = fnames[0][0:-11]
date = pysat.datetime.strptime(fnames[0][-10:], '%Y-%m-%d')
with pysatCDF.CDF(fname) as cdf:
data, meta = cdf.to_pysat()
# pick out data for date
data = data.ix[date:date+pds.DateOffset(days=1) - pds.DateOffset(microseconds=1)]
return data, meta
#return cdf.to_pysat()
def clean(omni):
for key in omni.data.columns:
if key != 'Epoch':
idx, = np.where(omni[key] == omni.meta[key].fillval)
omni.data.ix[idx, key] = np.nan
def time_shift_to_magnetic_poles(inst):
"""
OMNI data is time-shifted to bow shock. Time shifted again
to intersections with magnetic pole.
Time shift calculated using distance to bow shock nose (BSN)
and velocity of solar wind along x-direction.
"""
# need to fill in Vx to get an estimate of what is going on
inst['Vx'] = inst['Vx'].interpolate('nearest')
inst['Vx'] = inst['Vx'].fillna(method='backfill')
inst['Vx'] = inst['Vx'].fillna(method='pad')
inst['BSN_x'] = inst['BSN_x'].interpolate('nearest')
inst['BSN_x'] = inst['BSN_x'].fillna(method='backfill')
inst['BSN_x'] = inst['BSN_x'].fillna(method='pad')
# make sure there are no gaps larger than a minute
inst.data = inst.data.resample('1T').interpolate('time')
time_x = inst['BSN_x']*6371.2/-inst['Vx']
idx, = np.where(np.isnan(time_x))
if len(idx) > 0:
print (time_x[idx])
print (time_x)
time_x_offset = [pds.DateOffset(seconds = time) for time in time_x.astype(int)]
new_index=[]
for i, time in enumerate(time_x_offset):
new_index.append(inst.data.index[i] + time)
inst.data.index = new_index
inst.data = inst.data.sort_index()
return
def download(date_array, tag, sat_id, data_path=None, user=None, password=None):
"""
download OMNI data, layout consistent with pysat
"""
import os
import ftplib
ftp = ftplib.FTP('cdaweb.gsfc.nasa.gov') # connect to host, default port
ftp.login() # user anonymous, passwd anonymous@
if (tag == '1min') | (tag == '5min'):
ftp.cwd('/pub/data/omni/omni_cdaweb/hro_'+tag)
for date in date_array:
fname = '{year1:4d}/omni_hro_'+tag+'_{year2:4d}{month:02d}{day:02d}_v01.cdf'
fname = fname.format(year1=date.year, year2=date.year, month=date.month, day=date.day)
local_fname = ''.join(['omni_hro_',tag,'_{year:4d}{month:02d}{day:02d}_v01.cdf']).format(
year=date.year, month=date.month, day=date.day)
saved_fname = os.path.join(data_path,local_fname)
try:
print('Downloading file for '+date.strftime('%D'))
sys.stdout.flush()
ftp.retrbinary('RETR '+fname, open(saved_fname,'wb').write)
except ftplib.error_perm as exception:
# if exception[0][0:3] != '550':
if str(exception.args[0]).split(" ", 1)[0] != '550':
raise
else:
os.remove(saved_fname)
print('File not available for '+ date.strftime('%D'))
ftp.close()
# ftp.quit()
return
| {
"content_hash": "a55b23a476197fd126195c23af80664c",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 101,
"avg_line_length": 36.16326530612245,
"alnum_prop": 0.6079288939051919,
"repo_name": "aburrell/pysat",
"id": "f90197ab677b4204d9fe84f578b69ff91bf83bb8",
"size": "7112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysat/instruments/omni_hro.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "385720"
}
],
"symlink_target": ""
} |
import unittest
import warnings
import sys
from test import test_support
class TestSpecifics(unittest.TestCase):
def test_debug_assignment(self):
# catch assignments to __debug__
self.assertRaises(SyntaxError, compile, '__debug__ = 1', '?', 'single')
import __builtin__
prev = __builtin__.__debug__
setattr(__builtin__, '__debug__', 'sure')
setattr(__builtin__, '__debug__', prev)
def test_argument_handling(self):
# detect duplicate positional and keyword arguments
self.assertRaises(SyntaxError, eval, 'lambda a,a:0')
self.assertRaises(SyntaxError, eval, 'lambda a,a=1:0')
self.assertRaises(SyntaxError, eval, 'lambda a=1,a=1:0')
try:
exec 'def f(a, a): pass'
self.fail("duplicate arguments")
except SyntaxError:
pass
try:
exec 'def f(a = 0, a = 1): pass'
self.fail("duplicate keyword arguments")
except SyntaxError:
pass
try:
exec 'def f(a): global a; a = 1'
self.fail("variable is global and local")
except SyntaxError:
pass
def test_syntax_error(self):
self.assertRaises(SyntaxError, compile, "1+*3", "filename", "exec")
def test_duplicate_global_local(self):
try:
exec 'def f(a): global a; a = 1'
self.fail("variable is global and local")
except SyntaxError:
pass
def test_exec_with_general_mapping_for_locals(self):
class M:
"Test mapping interface versus possible calls from eval()."
def __getitem__(self, key):
if key == 'a':
return 12
raise KeyError
def __setitem__(self, key, value):
self.results = (key, value)
def keys(self):
return list('xyz')
m = M()
g = globals()
exec 'z = a' in g, m
self.assertEqual(m.results, ('z', 12))
try:
exec 'z = b' in g, m
except NameError:
pass
else:
self.fail('Did not detect a KeyError')
exec 'z = dir()' in g, m
self.assertEqual(m.results, ('z', list('xyz')))
exec 'z = globals()' in g, m
self.assertEqual(m.results, ('z', g))
exec 'z = locals()' in g, m
self.assertEqual(m.results, ('z', m))
try:
exec 'z = b' in m
except TypeError:
pass
else:
self.fail('Did not validate globals as a real dict')
class A:
"Non-mapping"
pass
m = A()
try:
exec 'z = a' in g, m
except TypeError:
pass
else:
self.fail('Did not validate locals as a mapping')
# Verify that dict subclasses work as well
class D(dict):
def __getitem__(self, key):
if key == 'a':
return 12
return dict.__getitem__(self, key)
d = D()
exec 'z = a' in g, d
self.assertEqual(d['z'], 12)
def test_complex_args(self):
def comp_args((a, b)):
return a,b
self.assertEqual(comp_args((1, 2)), (1, 2))
def comp_args((a, b)=(3, 4)):
return a, b
self.assertEqual(comp_args((1, 2)), (1, 2))
self.assertEqual(comp_args(), (3, 4))
def comp_args(a, (b, c)):
return a, b, c
self.assertEqual(comp_args(1, (2, 3)), (1, 2, 3))
def comp_args(a=2, (b, c)=(3, 4)):
return a, b, c
self.assertEqual(comp_args(1, (2, 3)), (1, 2, 3))
self.assertEqual(comp_args(), (2, 3, 4))
def test_argument_order(self):
try:
exec 'def f(a=1, (b, c)): pass'
self.fail("non-default args after default")
except SyntaxError:
pass
def test_float_literals(self):
# testing bad float literals
self.assertRaises(SyntaxError, eval, "2e")
self.assertRaises(SyntaxError, eval, "2.0e+")
self.assertRaises(SyntaxError, eval, "1e-")
self.assertRaises(SyntaxError, eval, "3-4e/21")
def test_indentation(self):
# testing compile() of indented block w/o trailing newline"
s = """
if 1:
if 2:
pass"""
compile(s, "<string>", "exec")
def test_literals_with_leading_zeroes(self):
for arg in ["077787", "0xj", "0x.", "0e", "090000000000000",
"080000000000000", "000000000000009", "000000000000008"]:
self.assertRaises(SyntaxError, eval, arg)
self.assertEqual(eval("0777"), 511)
self.assertEqual(eval("0777L"), 511)
self.assertEqual(eval("000777"), 511)
self.assertEqual(eval("0xff"), 255)
self.assertEqual(eval("0xffL"), 255)
self.assertEqual(eval("0XfF"), 255)
self.assertEqual(eval("0777."), 777)
self.assertEqual(eval("0777.0"), 777)
self.assertEqual(eval("000000000000000000000000000000000000000000000000000777e0"), 777)
self.assertEqual(eval("0777e1"), 7770)
self.assertEqual(eval("0e0"), 0)
self.assertEqual(eval("0000E-012"), 0)
self.assertEqual(eval("09.5"), 9.5)
self.assertEqual(eval("0777j"), 777j)
self.assertEqual(eval("00j"), 0j)
self.assertEqual(eval("00.0"), 0)
self.assertEqual(eval("0e3"), 0)
self.assertEqual(eval("090000000000000."), 90000000000000.)
self.assertEqual(eval("090000000000000.0000000000000000000000"), 90000000000000.)
self.assertEqual(eval("090000000000000e0"), 90000000000000.)
self.assertEqual(eval("090000000000000e-0"), 90000000000000.)
self.assertEqual(eval("090000000000000j"), 90000000000000j)
self.assertEqual(eval("000000000000007"), 7)
self.assertEqual(eval("000000000000008."), 8.)
self.assertEqual(eval("000000000000009."), 9.)
def test_unary_minus(self):
# Verify treatment of unary minus on negative numbers SF bug #660455
if sys.maxint == 2147483647:
# 32-bit machine
all_one_bits = '0xffffffff'
self.assertEqual(eval(all_one_bits), 4294967295L)
self.assertEqual(eval("-" + all_one_bits), -4294967295L)
elif sys.maxint == 9223372036854775807:
# 64-bit machine
all_one_bits = '0xffffffffffffffff'
self.assertEqual(eval(all_one_bits), 18446744073709551615L)
self.assertEqual(eval("-" + all_one_bits), -18446744073709551615L)
else:
self.fail("How many bits *does* this machine have???")
def test_sequence_unpacking_error(self):
# Verify sequence packing/unpacking with "or". SF bug #757818
i,j = (1, -1) or (-1, 1)
self.assertEqual(i, 1)
self.assertEqual(j, -1)
def test_none_assignment(self):
stmts = [
'None = 0',
'None += 0',
'__builtins__.None = 0',
'def None(): pass',
'class None: pass',
'(a, None) = 0, 0',
'for None in range(10): pass',
'def f(None): pass',
]
for stmt in stmts:
stmt += "\n"
self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'single')
self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'exec')
def test_import(self):
succeed = [
'import sys',
'import os, sys',
'from __future__ import nested_scopes, generators',
'from __future__ import (nested_scopes,\ngenerators)',
'from __future__ import (nested_scopes,\ngenerators,)',
'from sys import stdin, stderr, stdout',
'from sys import (stdin, stderr,\nstdout)',
'from sys import (stdin, stderr,\nstdout,)',
'from sys import (stdin\n, stderr, stdout)',
'from sys import (stdin\n, stderr, stdout,)',
'from sys import stdin as si, stdout as so, stderr as se',
'from sys import (stdin as si, stdout as so, stderr as se)',
'from sys import (stdin as si, stdout as so, stderr as se,)',
]
fail = [
'import (os, sys)',
'import (os), (sys)',
'import ((os), (sys))',
'import (sys',
'import sys)',
'import (os,)',
'from (sys) import stdin',
'from __future__ import (nested_scopes',
'from __future__ import nested_scopes)',
'from __future__ import nested_scopes,\ngenerators',
'from sys import (stdin',
'from sys import stdin)',
'from sys import stdin, stdout,\nstderr',
'from sys import stdin si',
'from sys import stdin,'
'from sys import (*)',
'from sys import (stdin,, stdout, stderr)',
'from sys import (stdin, stdout),',
]
for stmt in succeed:
compile(stmt, 'tmp', 'exec')
for stmt in fail:
self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'exec')
def test_for_distinct_code_objects(self):
# SF bug 1048870
def f():
f1 = lambda x=1: x
f2 = lambda x=2: x
return f1, f2
f1, f2 = f()
self.assertNotEqual(id(f1.func_code), id(f2.func_code))
def test_main():
test_support.run_unittest(TestSpecifics)
if __name__ == "__main__":
test_main()
| {
"content_hash": "503ec5e7f1ce49b8af6d9c01a245db6b",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 95,
"avg_line_length": 35.742537313432834,
"alnum_prop": 0.5327278421547135,
"repo_name": "MalloyPower/parsing-python",
"id": "c567fa432abb6efafc3c6463a94c003819296063",
"size": "9579",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.4/Lib/test/test_compile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
import csv
import numpy as np
import stream
class PLDStream(stream.VectorStream):
def __init__(self, file='/var/datasets/wdc/pld-arc'):
self.f=open(file,'r')
stream.VectorStream.__init__(self,p=65133, n=69878)
self.reader=csv.reader(self.f, delimiter='\t')
self._lastRow=None
self._userId=None
self._userVector=np.zeros((self.p,1))
def __del__(self):
del self.reader
self.f.close()
def __iter__(self):
return self
def __next__(self):
nu=self.__nextUser()
self.t+=1
return nu
def next(self):
return self.__next__()
def __nextUser(self):
while True:
## Get new row or use previously retrieved
if self._lastRow:
## Previously retrieved row means it's the first
## row of a new user
row=self._lastRow
self._userVector=np.zeros((self.p,1))
self._userId=row[0]
else:
try:
row=self.reader.next()
except StopIteration:
row=None
self._lastRow=row
if row==None:
if self._userId==None:
raise StopIteration
else:
##No more data. Just return collected user vector
self._userId=None
return self._userVector
else: ## row!=None
if self._userId and row[0]!=self._userId:
## New user: Return.
## At the next call, we'll proceed with the next user
self._userId=None
return self._userVector
self._userId=row[0]
##Proceed
## Get the basic values from the row
try:
self._userVector[int(row[1])-1]=1
except IndexError:
print row
raise
## Delete row, now that we've used it
self._lastRow=None
| {
"content_hash": "db72f4dbe49b1b7542aad5a7820888f4",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 73,
"avg_line_length": 28.972972972972972,
"alnum_prop": 0.4631529850746269,
"repo_name": "mitliagkas/pyliakmon",
"id": "fd0e1af53938ae59202d0229b2bb1e090035b3ff",
"size": "2144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebDataCommons.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "61272"
}
],
"symlink_target": ""
} |
from django.contrib import auth
from django.core.exceptions import ImproperlyConfigured
from models import CertificateIndex, Certificate
from binascii import crc32
def get_user(request):
if not hasattr(request, '_cached_user'):
request._cached_user = auth.get_user(request)
return request._cached_user
class SSLAuthenticationMiddleware(object):
header = 'SSL_VERIFIED'
header_pem = 'SSL_CLIENT_CERT'
header_serial = 'SSL_CLIENT_SERIAL'
header_dn = 'SSL_DN'
def process_request(self, request):
if not request.META.has_key(self.header):
raise ImproperlyConfigured('Requires SSL on frontend web server'
' to be enabled and properly configured'
' to pass SSL_* headers.')
if request.META.get(self.header, None) == 'SUCCESS':
(pem, serial, dn) = (
request.META.get(self.header_pem, ''),
request.META.get(self.header_serial, ''),
request.META.get(self.header_dn, '')
)
users = map(lambda i: i.certificate.user,
CertificateIndex.objects.filter(
pem_index=crc32(pem)&0xffffffff,
serial_index=crc32(serial)&0xffffffff,
dn_index=crc32(dn)&0xffffffff
)
)
if len(users) > 1:
users = filter(lambda u: any(u.certificates.filter(pem=pem, serial=serial, dn=dn)), users)
if not any(users):
certificate, created = Certificate.objects.get_or_create(pem=pem, serial=serial, dn=dn)
return
if len(users) > 1:
raise Exception('Too many users for certificate')
user = users[0]
if request.user.is_authenticated():
if request.user == user:
return
user = auth.authenticate(user=user)
if user:
request.user = user
auth.login(request, user)
| {
"content_hash": "4768665e07f78ce2df88575075da88cd",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 106,
"avg_line_length": 41.52,
"alnum_prop": 0.552504816955684,
"repo_name": "agladkov/django-sslauth",
"id": "e2e363876e4a09157c435a3c9697c6b5d075c261",
"size": "2076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sslauth/middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5786"
}
],
"symlink_target": ""
} |
import time
import inspect
import pickle
import os
import pytest
from pyparsing import *
from bayesdb.bql_grammar import *
from bayesdb.engine import Engine
from bayesdb.parser import Parser
import bayesdb.functions as functions
import numpy
engine = Engine('local')
parser = Parser()
test_M_c = {'idx_to_name': {'1': 'b', '0': 'a', '3': 'd', '2': 'c'},
'column_metadata': [
{'code_to_value': {'a': 0, '1': 1, '2': 2, '4': 3, '6': 4},
'value_to_code': {0: 'a', 1: '1', 2: '2', 3: '4', 4: '6'},
'modeltype': 'symmetric_dirichlet_discrete'},
{'code_to_value': {}, 'value_to_code': {},
'modeltype': 'normal_inverse_gamma'},
{'code_to_value': {'we': 0, 'e': 1, 'w': 2, 'sd': 3},
'value_to_code': {0: 'we', 1: 'e', 2: 'w', 3: 'sd'},
'modeltype': 'symmetric_dirichlet_discrete'},
{'code_to_value': {'3': 1, '2': 2, '5': 0, '4': 3},
'value_to_code': {0: '5', 1: '3', 2: '2', 3: '4'},
'modeltype': 'symmetric_dirichlet_discrete'}],
'name_to_idx': {'a': 0, 'c': 2, 'b': 1, 'd': 3}}
test_M_c_full = {'idx_to_name': {'1': 'b', '0': 'a', '3': 'd', '2': 'c', '4': 'key'},
'column_metadata': [
{'code_to_value': {'a': 0, '1': 1, '2': 2, '4': 3, '6': 4},
'value_to_code': {0: 'a', 1: '1', 2: '2', 3: '4', 4: '6'},
'modeltype': 'symmetric_dirichlet_discrete'},
{'code_to_value': {}, 'value_to_code': {},
'modeltype': 'normal_inverse_gamma'},
{'code_to_value': {'we': 0, 'e': 1, 'w': 2, 'sd': 3},
'value_to_code': {0: 'we', 1: 'e', 2: 'w', 3: 'sd'},
'modeltype': 'symmetric_dirichlet_discrete'},
{'code_to_value': {'3': 1, '2': 2, '5': 0, '4': 3},
'value_to_code': {0: '5', 1: '3', 2: '2', 3: '4'},
'modeltype': 'symmetric_dirichlet_discrete'},
{'code_to_value': {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8},
'value_to_code': {0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8'},
'modeltype': 'ignore'}],
'name_to_idx': {'a': 0, 'c': 2, 'b': 1, 'd': 3, 'key': 4}}
test_T = [[1.0, 1.0, 0.0, numpy.nan],
[2.0, 2.0, 0.0, 2.0],
[0.0, 3.0, 0.0, 3.0],
[3.0, 3.0, 2.0, numpy.nan],
[3.0, 4.0, 2.0, 0.0],
[4.0, 5.0, 1.0, numpy.nan],
[numpy.nan, 6.0, 2.0, 1.0],
[numpy.nan, 7.0, 3.0, 1.0],
[numpy.nan, 7.0, 3.0, 1.0]]
test_T_full = [[1.0, 1.0, 0.0, numpy.nan, 0],
[2.0, 2.0, 0.0, 2.0, 1],
[0.0, 3.0, 0.0, 3.0, 2],
[3.0, 3.0, 2.0, numpy.nan, 3],
[3.0, 4.0, 2.0, 0.0, 4],
[4.0, 5.0, 1.0, numpy.nan, 5],
[numpy.nan, 6.0, 2.0, 1.0, 6],
[numpy.nan, 7.0, 3.0, 1.0, 7],
[numpy.nan, 7.0, 3.0, 1.0, 8]]
def test_keyword_plurality_ambiguity_pyparsing():
model = model_keyword.parseString("model",parseAll=True)
models = model_keyword.parseString("models",parseAll=True)
assert model[0] == 'model'
assert models[0] == 'model'
iteration = iteration_keyword.parseString("iteration",parseAll=True)
iterations = iteration_keyword.parseString("iterations",parseAll=True)
assert iteration[0] == 'iteration'
assert iterations[0] == 'iteration'
sample = sample_keyword.parseString("sample",parseAll=True)
samples = sample_keyword.parseString("samples",parseAll=True)
assert sample[0] == 'sample'
assert samples[0] == 'sample'
column = column_keyword.parseString('column',parseAll=True)
columns = column_keyword.parseString('columns',parseAll=True)
assert column[0] == 'column'
assert columns[0] == 'column'
list_ = list_keyword.parseString('list',parseAll=True)
lists = list_keyword.parseString('lists',parseAll=True)
assert list_[0] == 'list'
assert lists[0] == 'list'
btable = btable_keyword.parseString('btable',parseAll=True)
btables = btable_keyword.parseString('btables',parseAll=True)
assert btable[0] == 'btable'
assert btables[0] == 'btable'
minute = minute_keyword.parseString('minute',parseAll=True)
minutes = minute_keyword.parseString('minutes',parseAll=True)
assert minute[0] == 'minute'
assert minute[0] == 'minute'
def test_composite_keywords_pyparsing():
execute_file = execute_file_keyword.parseString('eXecute file',parseAll=True)
assert execute_file[0] == 'execute_file'
create_btable = create_btable_keyword.parseString('cReate btable',parseAll=True)
assert create_btable[0] == 'create_btable'
update_schema_for = update_schema_for_keyword.parseString('update Schema for',parseAll=True)
assert update_schema_for[0] == 'update_schema'
models_for = models_for_keyword.parseString('Models for',parseAll=True)
assert models_for[0] == 'model for'
model_index = model_index_keyword.parseString('model Index',parseAll=True)
assert model_index[0] == 'model index'
save_model = save_model_keyword.parseString("save modeL",parseAll=True)
assert save_model[0] == 'save_models'
load_model = load_model_keyword.parseString("load Models",parseAll=True)
assert load_model[0] == 'load_models'
save_to = save_to_keyword.parseString('save To',parseAll=True)
assert save_to[0] == 'save to'
list_btables = list_btables_keyword.parseString('list bTables',parseAll=True)
assert list_btables[0] == 'list_btables'
show_schema_for = show_schema_for_keyword.parseString('show Schema for',parseAll=True)
assert show_schema_for[0] == 'show_schema'
show_models_for = show_models_for_keyword.parseString("show modeLs for",parseAll=True)
assert show_models_for[0] == 'show_models'
show_diagnostics_for = show_diagnostics_for_keyword.parseString("show diaGnostics for",parseAll=True)
assert show_diagnostics_for[0] == 'show_diagnostics'
estimate_pairwise = estimate_pairwise_keyword.parseString("estimate Pairwise",parseAll=True)
assert estimate_pairwise[0] == 'estimate_pairwise'
with_confidence = with_confidence_keyword.parseString('with confIdence',parseAll=True)
assert with_confidence[0] == 'with confidence'
dependence_probability = dependence_probability_keyword.parseString('dependence probability',parseAll=True)
assert dependence_probability[0] == 'dependence probability'
mutual_information = mutual_information_keyword.parseString('mutual inFormation',parseAll=True)
assert mutual_information[0] == 'mutual information'
estimate_columns_from = estimate_columns_from_keyword.parseString("estimate columns froM",parseAll=True)
assert estimate_columns_from[0] == 'estimate_columns'
column_lists = column_lists_keyword.parseString('column Lists',parseAll=True)
assert column_lists[0] == 'column list'
with_respect_to = with_respect_to_keyword.parseString("with Respect to",parseAll=True)
assert with_respect_to[0] == 'with respect to'
probability_of = probability_of_keyword.parseString('probability of',parseAll=True)
assert probability_of[0] == 'probability'
predictive_probability_of = predictive_probability_of_keyword.parseString('predictive Probability of',parseAll=True)
assert predictive_probability_of[0] == 'predictive probability'
save_clusters_with_threshold = save_clusters_with_threshold_keyword.parseString(
'save clusters with threshold',parseAll=True)
assert save_clusters_with_threshold[0] == 'save clusters with threshold'
estimate_pairwise_row = estimate_pairwise_row_keyword.parseString("estimate Pairwise row",parseAll=True)
assert estimate_pairwise_row[0] == 'estimate_pairwise_row'
def test_valid_values_names_pyparsing():
valid_values=[
'4',
'42.04',
'.4',
'4.',
"'\sjekja8391(*^@(%()!@#$%^&*()_+=-~'",
"a0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+-./:<=>?@[\]^_`{|}~",
'b0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+-./:<=>?@[\]^_`{|}~',
'"c0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\\"#$%&\'()*+-./:<=>?@[\]^_`{|}~"',
"'d0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&\\'()*+-./:<=>?@[\]^_`{|}~'",
"'numbers 0'",
"'k skj s'",
]
valid_values_results=[
'4',
'42.04',
'.4',
'4.',
'\sjekja8391(*^@(%()!@#$%^&*()_+=-~',
"a0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+-./:<=>?@[\]^_`{|}~",
'b0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+-./:<=>?@[\]^_`{|}~',
"c0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&\'()*+-./:<=>?@[\]^_`{|}~",
"d0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&\'()*+-./:<=>?@[\]^_`{|}~",
'numbers 0',
'k skj s',
]
for i in range(len(valid_values)):
assert value.parseString(valid_values[i],parseAll=True)[0] == valid_values_results[i]
valid_column_identifiers = [
"a",
"a1",
"a_1",
"a_a",
"a_",
"aa"
]
valid_column_identifiers_results = [
"a",
"a1",
"a_1",
"a_a",
"a_",
"aa"
]
for i in range(len(valid_column_identifiers)):
assert value.parseString(valid_column_identifiers[i],parseAll=True)[0] == valid_column_identifiers_results[i]
assert float_number.parseString('1',parseAll=True)[0] == '1'
assert int_number.parseString('1',parseAll=True)[0] == '1'
assert float_number.parseString('1.')[0] == '1'
assert float_number.parseString('.1',parseAll=True)[0] == '.1'
assert float_number.parseString('0.1',parseAll=True)[0] == '0.1'
assert float_number.parseString('11',parseAll=True)[0] == '11'
assert int_number.parseString('11',parseAll=True)[0] == '11'
assert float_number.parseString('11.01',parseAll=True)[0] == '11.01'
assert filename.parseString("~/filename.csv",parseAll=True)[0] == "~/filename.csv"
assert filename.parseString("!\"/#$%&'()*+-.:<=>?@[\]^_`{|}~",parseAll=True)[0] == "!\"/#$%&'()*+-.:<=>?@[\]^_`{|}~"
assert filename.parseString("'/filename with space.csv'",parseAll=True)[0] == "/filename with space.csv"
def test_simple_functions():
assert list_btables_function.parseString("LIST BTABLES",parseAll=True).statement_id == 'list_btables'
assert list_btables_function.parseString("LIST BTABLE",parseAll=True).statement_id == 'list_btables'
assert show_for_btable_statement.parseString("SHOW SCHEMA FOR table_1",parseAll=True).statement_id == 'show_schema'
assert show_for_btable_statement.parseString("SHOW SCHEMA FOR table_1",parseAll=True).btable == 'table_1'
assert show_for_btable_statement.parseString("SHOW MODELS FOR table_1",parseAll=True).statement_id == 'show_models'
assert show_for_btable_statement.parseString("SHOW MODEL FOR table_1",parseAll=True).btable == 'table_1'
assert show_for_btable_statement.parseString("SHOW DIAGNOSTICS FOR table_1",parseAll=True).statement_id == 'show_diagnostics'
assert show_for_btable_statement.parseString("SHOW DIAGNOSTICS FOR table_1",parseAll=True).btable == 'table_1'
assert show_for_btable_statement.parseString("SHOW COLUMN LISTS FOR table_1",parseAll=True).btable == 'table_1'
assert show_for_btable_statement.parseString("SHOW COLUMNS LIST FOR table_1",parseAll=True).statement_id == 'show_column_lists'
assert show_columns_function.parseString("SHOW COLUMNS asdf FOR table_1",parseAll=True).column_list == 'asdf'
assert drop_column_list_function.parseString("DROP COLUMN LIST group1 FROM table_1", parseAll=True).list_name == 'group1'
assert drop_column_list_function.parseString("DROP COLUMN LIST group1 FROM table_1", parseAll=True).statement_id == 'drop_column_list'
assert drop_column_list_function.parseString("DROP COLUMN LIST group1 FROM table_1", parseAll=True).btable == 'table_1'
assert show_for_btable_statement.parseString("SHOW ROW LISTS FOR table_1",parseAll=True).statement_id == 'show_row_lists'
assert show_for_btable_statement.parseString("SHOW ROW list FOR table_1",parseAll=True).btable == 'table_1'
assert drop_row_list_function.parseString("DROP ROW LIST group1 FROM table_1", parseAll=True).list_name == 'group1'
assert drop_row_list_function.parseString("DROP ROW LIST group1 FROM table_1", parseAll=True).statement_id == 'drop_row_list'
assert drop_row_list_function.parseString("DROP ROW LIST group1 FROM table_1", parseAll=True).btable == 'table_1'
assert load_model_function.parseString("LOAD MODELS ~/filename.csv INTO table_1",parseAll=True).statement_id == 'load_models'
assert load_model_function.parseString("LOAD MODEL ~/filename.csv INTO table_1",parseAll=True).statement_id == 'load_models'
assert load_model_function.parseString("LOAD MODELS ~/filename.csv INTO table_1",parseAll=True).filename == '~/filename.csv'
assert load_model_function.parseString("LOAD MODELS '~/filena me.csv' INTO table_1",parseAll=True).filename == '~/filena me.csv'
assert load_model_function.parseString("LOAD MODELS ~/filename.csv INTO table_1",parseAll=True).btable == 'table_1'
assert save_model_from_function.parseString("SAVE MODEL FROM table_1 to filename.pkl.gz",parseAll=True).btable == 'table_1'
assert save_model_from_function.parseString("SAVE MODEL FROM table_1 to filename.pkl.gz",parseAll=True).statement_id == 'save_models'
assert save_model_from_function.parseString("SAVE MODEL FROM table_1 to filename.pkl.gz",parseAll=True).filename == 'filename.pkl.gz'
assert drop_btable_function.parseString("DROP BTABLE table_1",parseAll=True).statement_id == 'drop_btable'
assert drop_btable_function.parseString("DROP BTABLES table_1",parseAll=True).statement_id == 'drop_btable'
assert drop_btable_function.parseString("DROP BTABLE table_1",parseAll=True).btable == 'table_1'
drop_model_1 = drop_model_function.parseString("DROP MODEL 1 FROM table_1",parseAll=True)
drop_model_2 = drop_model_function.parseString("DROP MODELS 1-5 FROM table_1",parseAll=True)
drop_model_3 = drop_model_function.parseString("DROP MODELS 1,2,6-9 FROM table_1",parseAll=True)
drop_model_4 = drop_model_function.parseString("DROP MODELS 1-5,1-5 FROM table_1",parseAll=True)
assert drop_model_1.statement_id == 'drop_models'
assert drop_model_1.btable == 'table_1'
assert drop_model_1.index_clause.asList() == [1]
assert drop_model_2.index_clause.asList() == [1,2,3,4,5]
assert drop_model_3.index_clause.asList() == [1,2,6,7,8,9]
assert drop_model_4.index_clause.asList() == [1,2,3,4,5]
assert help_function.parseString("HELp",parseAll=True).statement_id == 'help'
def test_update_schema_pyparsing():
update_schema_1 = update_schema_for_function.parseString("UPDATE SCHEMA FOR test_btable SET col_1 = Categorical,col_2=numerical , col_3 = ignore",parseAll=True)
assert update_schema_1.statement_id == 'update_schema'
assert update_schema_1.btable == 'test_btable'
assert update_schema_1.type_clause[0][0] == 'col_1'
assert update_schema_1.type_clause[0][1] == 'categorical'
assert update_schema_1.type_clause[1][0] == 'col_2'
assert update_schema_1.type_clause[1][1] == 'numerical'
assert update_schema_1.type_clause[2][0] == 'col_3'
assert update_schema_1.type_clause[2][1] == 'ignore'
update_schema_2 = update_schema_for_function.parseString("UPDATE SCHEMA FOR test_btable SET col_1 = key",parseAll=True)
assert update_schema_2.type_clause[0][0] == 'col_1'
assert update_schema_2.type_clause[0][1] == 'key'
update_schema_3 = update_schema_for_function.parseString("UPDATE SCHEMA FOR test_btable SET col_1 = categorical(15)", parseAll=True)
assert update_schema_3.type_clause[0][0] == 'col_1'
assert update_schema_3.type_clause[0][1] == 'categorical'
assert update_schema_3.type_clause[0].parameters.cardinality == '15'
update_schema_4 = update_schema_for_function.parseString("UPDATE SCHEMA FOR test_btable SET col_2 = cyclic(0, 10)", parseAll=True)
assert update_schema_4.type_clause[0][0] == 'col_2'
assert update_schema_4.type_clause[0][1] == 'cyclic'
assert update_schema_4.type_clause[0].parameters.min == '0'
assert update_schema_4.type_clause[0].parameters.max == '10'
def test_update_codebook_pyparsing():
bql_string = "UPDATE CODEBOOK FOR test_btable FROM new_codebook.csv"
update_codebook = update_codebook_for_function.parseString(bql_string)
assert update_codebook.statement_id == 'update_codebook'
assert update_codebook.btable == 'test_btable'
assert update_codebook.filename == 'new_codebook.csv'
def test_describe_pyparsing():
bql_string = "DESCRIBE col_1 FOR test_btable"
describe_0 = describe_function.parseString(bql_string)
bql_string = "DESCRIBE col_1, col_2 FOR test_btable"
describe_1 = describe_function.parseString(bql_string)
assert describe_0.statement_id == 'describe'
assert describe_0.btable == 'test_btable'
assert 'col_1' in describe_0.columnset.asList()
assert describe_1.statement_id == 'describe'
assert describe_1.btable == 'test_btable'
assert 'col_1' in describe_1.columnset.asList()
assert 'col_2' in describe_1.columnset.asList()
def test_update_descriptions_pyparsing():
bql_string = 'UPDATE DESCRIPTION FOR test_btable SET col_1="Hamish the cat"'
updated_description_0 = update_descriptions_for_function.parseString(bql_string)
assert updated_description_0.statement_id == "update_descriptions"
assert updated_description_0.btable == "test_btable"
assert updated_description_0.label_clause[0][0] == "col_1"
assert updated_description_0.label_clause[0][1] == "Hamish the cat"
bql_string = 'UPDATE DESCRIPTIONS FOR test_btable SET col_1="Hamish the cat", col_2="trevor"'
updated_description_1 = update_descriptions_for_function.parseString(bql_string)
assert updated_description_1.statement_id == "update_descriptions"
assert updated_description_1.btable == "test_btable"
assert updated_description_1.label_clause[0][0] == "col_1"
assert updated_description_1.label_clause[0][1] == "Hamish the cat"
assert updated_description_1.label_clause[1][0] == "col_2"
assert updated_description_1.label_clause[1][1] == "trevor"
def test_update_short_names_pyparsing():
bql_string = 'UPDATE SHORT NAME FOR test_btable SET col_1="Hamish"'
updated_short_names_0 = update_short_names_for_function.parseString(bql_string)
assert updated_short_names_0.statement_id == "update_short_names"
assert updated_short_names_0.btable == "test_btable"
assert updated_short_names_0.label_clause[0][0] == "col_1"
assert updated_short_names_0.label_clause[0][1] == "Hamish"
bql_string = 'UPDATE SHORT NAMES FOR test_btable SET col_1="Hamish", col_2="trevor"'
updated_short_names_1 = update_short_names_for_function.parseString(bql_string)
assert updated_short_names_1.statement_id == "update_short_names"
assert updated_short_names_1.btable == "test_btable"
assert updated_short_names_1.label_clause[0][0] == "col_1"
assert updated_short_names_1.label_clause[0][1] == "Hamish"
assert updated_short_names_1.label_clause[1][0] == "col_2"
assert updated_short_names_1.label_clause[1][1] == "trevor"
def test_create_btable_pyparsing():
create_btable_1 = create_btable_function.parseString("CREATE BTABLE test.btable FROM '~/filenam e.csv'", parseAll=True)
create_btable_2 = create_btable_function.parseString("CREATE BTABLE test_btable FROM ~/filename.csv", parseAll=True)
assert create_btable_1.statement_id == 'create_btable'
assert create_btable_1.btable == 'test.btable'
assert create_btable_1.filename == '~/filenam e.csv'
assert create_btable_2.btable == 'test_btable'
assert create_btable_2.filename == '~/filename.csv'
def test_execute_file_pyparsing():
execute_file_1 = execute_file_function.parseString("EXECUTE FILE '/filenam e.bql'",parseAll=True)
execute_file_2 = execute_file_function.parseString("EXECUTE FILE /filename.bql",parseAll=True)
assert execute_file_1.filename == "/filenam e.bql"
assert execute_file_2.filename == "/filename.bql"
def test_initialize_pyparsing():
initialize_1 = initialize_function.parseString("INITIALIZE 3 MODELS FOR test_table",parseAll=True)
assert initialize_1.statement_id == 'initialize_models'
assert initialize_1.num_models == '3'
assert initialize_1.btable == 'test_table'
initialize_2 = initialize_function.parseString("INITIALIZE 3 MODEL FOR test_table",parseAll=True)
assert initialize_2.statement_id == 'initialize_models'
assert initialize_2.num_models == '3'
assert initialize_2.btable == 'test_table'
initialize_3 = initialize_function.parseString("INITIALIZE MODELS FOR test_table",parseAll=True)
assert initialize_3.statement_id == 'initialize_models'
assert initialize_3.num_models == ''
assert initialize_3.btable == 'test_table'
def test_analyze_pyparsing():
analyze_1 = analyze_function.parseString("ANALYZE table_1 FOR 10 ITERATIONS",parseAll=True)
analyze_2 = analyze_function.parseString("ANALYZE table_1 FOR 1 ITERATION",parseAll=True)
analyze_3 = analyze_function.parseString("ANALYZE table_1 FOR 10 MINUTES",parseAll=True)
analyze_4 = analyze_function.parseString("ANALYZE table_1 FOR 1 MINUTE",parseAll=True)
analyze_5 = analyze_function.parseString("ANALYZE table_1 MODEL 1 FOR 10 MINUTES",parseAll=True)
analyze_6 = analyze_function.parseString("ANALYZE table_1 MODELS 1-3 FOR 1 ITERATION",parseAll=True)
analyze_7 = analyze_function.parseString("ANALYZE table_1 MODELS 1,2,3 FOR 10 MINUTES",parseAll=True)
analyze_8 = analyze_function.parseString("ANALYZE table_1 MODELS 1, 3-5 FOR 1 ITERATION",parseAll=True)
analyze_9 = analyze_function.parseString("ANALYZE table_1 MODELS 1-3, 5 FOR 10 MINUTES",parseAll=True)
analyze_10 = analyze_function.parseString("ANALYZE table_1 MODELS 1-3, 5-7, 9, 10 FOR 1 ITERATION",parseAll=True)
analyze_11 = analyze_function.parseString("ANALYZE table_1 MODELS 1, 1, 2, 2 FOR 10 MINUTES",parseAll=True)
analyze_12 = analyze_function.parseString("ANALYZE table_1 MODELS 1-5, 1-5, 5 FOR 1 ITERATION",parseAll=True)
assert analyze_1.statement_id == 'analyze'
assert analyze_1.btable == 'table_1'
assert analyze_1.index_lust == ''
assert analyze_1.index_clause == ''
assert analyze_1.num_iterations == '10'
assert analyze_1.num_minutes == ''
assert analyze_2.num_iterations == '1'
assert analyze_2.num_minutes == ''
assert analyze_3.num_iterations == ''
assert analyze_3.num_minutes == '10'
assert analyze_4.num_iterations == ''
assert analyze_4.num_minutes == '1'
assert analyze_5.index_clause.asList() == [1]
assert analyze_6.index_clause.asList() == [1,2,3]
assert analyze_7.index_clause.asList() == [1,2,3]
assert analyze_8.index_clause.asList() == [1,3,4,5]
assert analyze_9.index_clause.asList() == [1,2,3,5]
assert analyze_10.index_clause.asList() == [1,2,3,5,6,7,9,10]
assert analyze_11.index_clause.asList() == [1,2]
assert analyze_12.index_clause.asList() == [1,2,3,4,5]
def test_subclauses_pyparsing():
assert save_to_clause.parseString("save to filename.csv").filename == 'filename.csv'
def test_row_clause_pyparsing():
row_1 = row_clause.parseString('1', parseAll=True)
row_2 = row_clause.parseString("column = 1", parseAll=True)
row_3 = row_clause.parseString("column = 'value'", parseAll=True)
row_4 = row_clause.parseString("column = value", parseAll=True)
assert row_1.row_id == '1'
assert row_1.column == ''
assert row_2.row_id == ''
assert row_2.column == 'column'
assert row_2.column_value == '1'
assert row_3.column_value == 'value'
assert row_4.column_value == 'value'
def test_row_functions_pyparsing():
similarity_1 = similarity_to_function.parseString("SIMILARITY TO 1",
parseAll=True)
similarity_2 = similarity_to_function.parseString("SIMILARITY TO col_2 = 1",
parseAll=True)
similarity_3 = similarity_to_function.parseString("SIMILARITY TO col_2 = 'a'",
parseAll=True)
similarity_4 = similarity_to_function.parseString("SIMILARITY TO col_2 = a",
parseAll=True)
similarity_5 = similarity_to_function.parseString("SIMILARITY TO 1 WITH RESPECT TO col_1",
parseAll=True)
similarity_6 = similarity_to_function.parseString("SIMILARITY TO col_2 = 1 WITH RESPECT TO col_1,col_2",
parseAll=True)
similarity_7 = similarity_to_function.parseString("SIMILARITY TO col_2 = 'a' WITH RESPECT TO col_1 , col_3",
parseAll=True)
similarity_8 = similarity_to_function.parseString("SIMILARITY TO col_2 = a WITH RESPECT TO col_1",
parseAll=True)
assert similarity_1.function.function_id == 'similarity'
assert similarity_1.function.row_id == '1'
assert similarity_2.function.column == 'col_2'
assert similarity_2.function.column_value == '1'
assert similarity_3.function.column == 'col_2'
assert similarity_3.function.column_value == 'a'
assert similarity_4.function.column == 'col_2'
assert similarity_4.function.column_value == 'a'
assert similarity_4.function.with_respect_to == ''
assert not similarity_5.function.with_respect_to == ''
assert similarity_5.function.column_list.asList() == ['col_1']
assert similarity_6.function.column_list.asList() == ['col_1', 'col_2']
assert similarity_7.function.column_list.asList() == ['col_1', 'col_3']
assert similarity_8.function.column_list.asList() == ['col_1']
assert typicality_function.parseString('Typicality',parseAll=True).function.function_id == 'typicality'
def test_column_functions_pyparsing():
dependence_1 = dependence_probability_function.parseString('DEPENDENCE PROBABILITY WITH column_1',
parseAll=True)
dependence_2 = dependence_probability_function.parseString('DEPENDENCE PROBABILITY OF column_2 WITH column_1',
parseAll=True)
assert dependence_1.function.function_id == 'dependence probability'
assert dependence_2.function.function_id == 'dependence probability'
assert dependence_1.function.with_column == 'column_1'
assert dependence_2.function.with_column == 'column_1'
assert dependence_2.function.of_column == 'column_2'
mutual_1 = mutual_information_function.parseString('MUTUAL INFORMATION WITH column_1',
parseAll=True)
mutual_2 = mutual_information_function.parseString('MUTUAL INFORMATION OF column_2 WITH column_1',
parseAll=True)
assert mutual_1.function.function_id == 'mutual information'
assert mutual_2.function.function_id == 'mutual information'
assert mutual_1.function.with_column == 'column_1'
assert mutual_2.function.with_column == 'column_1'
assert mutual_2.function.of_column == 'column_2'
correlation_1 = correlation_function.parseString('CORRELATION WITH column_1',
parseAll=True)
correlation_2 = correlation_function.parseString('CORRELATION OF column_2 WITH column_1',
parseAll=True)
assert correlation_1.function.function_id == 'correlation'
assert correlation_2.function.function_id == 'correlation'
assert correlation_1.function.with_column == 'column_1'
assert correlation_2.function.with_column == 'column_1'
assert correlation_2.function.of_column == 'column_2'
def test_probability_of_function_pyparsing():
probability_of_1 = probability_of_function.parseString("PROBABILITY OF col_1 = 1",parseAll=True)
probability_of_2 = probability_of_function.parseString("PROBABILITY OF col_1 = 'value'",parseAll=True)
probability_of_3 = probability_of_function.parseString("PROBABILITY OF col_1 = value",parseAll=True)
assert probability_of_1.function.function_id == 'probability'
assert probability_of_1.function.column == 'col_1'
assert probability_of_1.function.value == '1'
assert probability_of_2.function.value == 'value'
assert probability_of_3.function.value == 'value'
def test_predictive_probability_of_pyparsing():
assert predictive_probability_of_function.parseString("PREDICTIVE PROBABILITY OF column_1",
parseAll=True).function.function_id == 'predictive probability'
assert predictive_probability_of_function.parseString("PREDICTIVE PROBABILITY OF column_1",
parseAll=True).function.column == 'column_1'
def test_typicality_of_pyparsing():
assert typicality_function.parseString("TYPICALITY OF column_1",
parseAll=True).function.function_id == 'typicality'
assert typicality_function.parseString("TYPICALITY OF column_1",
parseAll=True).function.column == 'column_1'
def test_order_by_clause_pyparsing():
order_by_1 = order_by_clause.parseString("ORDER BY column_1"
,parseAll=True)
order_by_2 = order_by_clause.parseString("ORDER BY column_1,column_2 , column_3"
,parseAll=True)
assert order_by_1.order_by[0].function.column == 'column_1'
assert order_by_2.order_by[1].function.column =='column_2'
order_by_3 = order_by_clause.parseString("ORDER BY TYPICALITY",
parseAll=True)
assert order_by_3.order_by[0].function.function_id == 'typicality'
order_by_4 = order_by_clause.parseString("ORDER BY TYPICALITY, column_1",
parseAll=True)
assert order_by_4.order_by[0].function.function_id == 'typicality'
assert order_by_4.order_by[1].function.column == 'column_1'
order_by_5 = order_by_clause.parseString("ORDER BY column_1, TYPICALITY",
parseAll=True)
assert order_by_5.order_by[0].function.column == 'column_1'
assert order_by_5.order_by[1].function.function_id == 'typicality'
order_by_6 = order_by_clause.parseString("ORDER BY PREDICTIVE PROBABILITY OF column_1",
parseAll=True)
assert order_by_6.order_by[0].function.function_id == 'predictive probability'
assert order_by_6.order_by[0].function.column == 'column_1'
order_by_7 = order_by_clause.parseString("ORDER BY PREDICTIVE PROBABILITY OF column_1, column_1",
parseAll=True)
assert order_by_7.order_by[1].function.column == 'column_1'
assert order_by_7.order_by[0].function.function_id == 'predictive probability'
assert order_by_7.order_by[0].function.column == 'column_1'
order_by_8 = order_by_clause.parseString("ORDER BY column_1, TYPICALITY, PREDICTIVE PROBABILITY OF column_1, column_2, SIMILARITY TO 2, SIMILARITY TO column_1 = 1 WITH RESPECT TO column_4",
parseAll=True)
assert order_by_8.order_by[0].function.column == 'column_1'
assert order_by_8.order_by[1].function.function_id == 'typicality'
assert order_by_8.order_by[2].function.function_id == 'predictive probability'
assert order_by_8.order_by[2].function.column == 'column_1'
assert order_by_8.order_by[3].function.column == 'column_2'
assert order_by_8.order_by[4].function.function_id == 'similarity'
assert order_by_8.order_by[4].function.row_id == '2'
assert order_by_8.order_by[5].function.function_id == 'similarity'
assert order_by_8.order_by[5].function.column == 'column_1'
assert order_by_8.order_by[5].function.column_value == '1'
assert order_by_8.order_by[5].function.with_respect_to[1][0] == 'column_4' #todo names instead of indexes
order_by_9 = order_by_clause.parseString("ORDER BY column_1 asc"
,parseAll=True)
order_by_10 = order_by_clause.parseString("ORDER BY column_1 asc,column_2 desc , column_3"
,parseAll=True)
assert order_by_9.order_by[0].function.column =='column_1'
assert order_by_10.order_by[1].function.column =='column_2'
assert order_by_9.order_by[0].asc_desc =='asc'
assert order_by_10.order_by[1].asc_desc =='desc'
order_by_11 = order_by_clause.parseString("ORDER BY TYPICALITY asc",
parseAll=True)
assert order_by_11.order_by[0].asc_desc =='asc'
def test_whereclause_pyparsing():
# WHERE <column> <operation> <value>
whereclause_1 = "WHERE column_1 = 1"
parsed_1 = where_clause.parseString(whereclause_1,parseAll=True)
assert parsed_1.where_keyword == 'where'
assert parsed_1.where_conditions[0].function.column == 'column_1'
assert parsed_1.where_conditions[0].operation == '='
assert parsed_1.where_conditions[0].value == '1'
whereclause_2 = "WHERE column_1 <= 1"
parsed_2 = where_clause.parseString(whereclause_2,parseAll=True)
assert parsed_2.where_conditions[0].function.column == 'column_1'
assert parsed_2.where_conditions[0].operation == '<='
assert parsed_2.where_conditions[0].value == '1'
whereclause_3 = "WHERE column_1 > 1.0"
parsed_3 = where_clause.parseString(whereclause_3,parseAll=True)
assert parsed_3.where_conditions[0].operation == '>'
assert parsed_3.where_conditions[0].value == '1.0'
whereclause_4 = "WHERE column_1 = a"
parsed_4 = where_clause.parseString(whereclause_4,parseAll=True)
assert parsed_4.where_conditions[0].operation == '='
assert parsed_4.where_conditions[0].value == 'a'
whereclause_5 = "WHERE column_1 = 'a'"
parsed_5 = where_clause.parseString(whereclause_5,parseAll=True)
assert parsed_5.where_conditions[0].value == 'a'
whereclause_6 = "WHERE column_1 = 'two words'"
parsed_6 = where_clause.parseString(whereclause_6,parseAll=True)
assert parsed_6.where_conditions[0].value == 'two words'
# Functions
whereclause_7 = "WHERE TYPICALITY > .8"
parsed_7 = where_clause.parseString(whereclause_7,parseAll=True)
assert parsed_7.where_conditions[0].function.function_id == 'typicality'
assert parsed_7.where_conditions[0].operation == '>'
assert parsed_7.where_conditions[0].value == '.8'
whereclause_8 = "WHERE PREDICTIVE PROBABILITY OF column_1 > .1"
parsed_8 = where_clause.parseString(whereclause_8,parseAll=True)
assert parsed_8.where_conditions[0].function.function_id == 'predictive probability'
assert parsed_8.where_conditions[0].function.column == 'column_1'
assert parsed_8.where_conditions[0].operation == '>'
assert parsed_8.where_conditions[0].value == '.1'
whereclause_9 = "WHERE SIMILARITY TO 2 > .1"
parsed_9 = where_clause.parseString(whereclause_9,parseAll=True)
assert parsed_9.where_conditions[0].function.function_id == 'similarity'
assert parsed_9.where_conditions[0].function.row_id == '2'
assert parsed_9.where_conditions[0].operation == '>'
assert parsed_9.where_conditions[0].value == '.1'
whereclause_10 = "WHERE SIMILARITY TO 2 WITH RESPECT TO column_1 > .4"
parsed_10 = where_clause.parseString(whereclause_10,parseAll=True)
assert parsed_10.where_conditions[0].function.function_id == 'similarity'
assert parsed_10.where_conditions[0].function.row_id == '2'
assert parsed_10.where_conditions[0].function.with_respect_to.column_list[0] == 'column_1'
assert parsed_10.where_conditions[0].operation == '>'
assert parsed_10.where_conditions[0].value == '.4'
whereclause_11 = "WHERE SIMILARITY TO column_1 = 1 = .5"
parsed_11 = where_clause.parseString(whereclause_11,parseAll=True)
assert parsed_11.where_conditions[0].function.function_id == 'similarity'
assert parsed_11.where_conditions[0].function.column == 'column_1'
assert parsed_11.where_conditions[0].function.column_value == '1'
assert parsed_11.where_conditions[0].operation == '='
assert parsed_11.where_conditions[0].value == '.5'
whereclause_12 = "WHERE SIMILARITY TO column_1 = 'a' WITH RESPECT TO column_2 > .5"
parsed_12 = where_clause.parseString(whereclause_12,parseAll=True)
assert parsed_12.where_conditions[0].function.function_id == 'similarity'
assert parsed_12.where_conditions[0].function.column == 'column_1'
assert parsed_12.where_conditions[0].function.column_value == 'a'
assert parsed_12.where_conditions[0].operation == '>'
assert parsed_12.where_conditions[0].value == '.5'
assert parsed_12.where_conditions[0].function.with_respect_to.column_list[0] == 'column_2'
whereclause_13 = "WHERE SIMILARITY TO column_1 = 1.2 WITH RESPECT TO column_2 > .5"
parsed_13 = where_clause.parseString(whereclause_13,parseAll=True)
assert parsed_13.where_conditions[0].function.function_id == 'similarity'
assert parsed_13.where_conditions[0].function.column == 'column_1'
assert parsed_13.where_conditions[0].function.column_value == '1.2'
assert parsed_13.where_conditions[0].operation == '>'
assert parsed_13.where_conditions[0].value == '.5'
assert parsed_13.where_conditions[0].function.with_respect_to.column_list[0] == 'column_2'
whereclause_14 = "WHERE SIMILARITY TO column_1 = a WITH RESPECT TO column_2 > .5"
parsed_14 = where_clause.parseString(whereclause_14,parseAll=True)
assert parsed_14.where_conditions[0].function.function_id == 'similarity'
assert parsed_14.where_conditions[0].function.column == 'column_1'
assert parsed_14.where_conditions[0].function.column_value == 'a'
assert parsed_14.where_conditions[0].operation == '>'
assert parsed_14.where_conditions[0].value == '.5'
assert parsed_14.where_conditions[0].function.with_respect_to.column_list[0] == 'column_2'
# With Confidence
whereclause_15 = "WHERE TYPICALITY > .8 CONF .5"
parsed_15 = where_clause.parseString(whereclause_15,parseAll=True)
assert parsed_15.where_conditions[0].conf == '.5'
whereclause_16 = "WHERE PREDICTIVE PROBABILITY OF column_1 > .1 CONF .5"
parsed_16 = where_clause.parseString(whereclause_16,parseAll=True)
assert parsed_16.where_conditions[0].conf == '.5'
whereclause_17 = "WHERE SIMILARITY TO 2 > .1 CONF .5"
parsed_17 = where_clause.parseString(whereclause_17,parseAll=True)
assert parsed_17.where_conditions[0].conf == '.5'
whereclause_18 = "WHERE SIMILARITY TO 2 WITH RESPECT TO column_1 > .4 CONF .5"
parsed_18 = where_clause.parseString(whereclause_18,parseAll=True)
assert parsed_18.where_conditions[0].conf == '.5'
whereclause_19 = "WHERE SIMILARITY TO column_1 = 1 = .5 CONF .5"
parsed_19 = where_clause.parseString(whereclause_19,parseAll=True)
assert parsed_19.where_conditions[0].conf == '.5'
whereclause_20 = "WHERE SIMILARITY TO column_1 = 'a' WITH RESPECT TO column_2 > .5 CONF .5"
parsed_20 = where_clause.parseString(whereclause_20,parseAll=True)
assert parsed_20.where_conditions[0].conf == '.5'
whereclause_21 = "WHERE SIMILARITY TO column_1 = 1.2 WITH RESPECT TO column_2 > .5 CONF .5"
parsed_21 = where_clause.parseString(whereclause_21,parseAll=True)
assert parsed_21.where_conditions[0].conf == '.5'
whereclause_22 = "WHERE SIMILARITY TO column_1 = a WITH RESPECT TO column_2 > .5 CONF .5"
parsed_22 = where_clause.parseString(whereclause_22,parseAll=True)
assert parsed_22.where_conditions[0].conf == '.5'
# AND
whereclause_23 = "WHERE column_1 = 'a' AND column_2 >= 3"
parsed_23 = where_clause.parseString(whereclause_23,parseAll=True)
assert parsed_23.where_conditions[0].function.column == 'column_1'
assert parsed_23.where_conditions[1].function.column == 'column_2'
whereclause_24 = "WHERE TYPICALITY > .8 AND PREDICTIVE PROBABILITY OF column_1 > .1 AND SIMILARITY TO 2 > .1"
parsed_24 = where_clause.parseString(whereclause_24,parseAll=True)
assert parsed_24.where_conditions[0].function.function_id == 'typicality'
assert parsed_24.where_conditions[1].function.function_id == 'predictive probability'
assert parsed_24.where_conditions[2].function.function_id == 'similarity'
whereclause_25 = "WHERE TYPICALITY > .8 CONF .4 AND PREDICTIVE PROBABILITY OF column_1 > .1 CONF .6 AND SIMILARITY TO 2 > .1 CONF .5"
parsed_25 = where_clause.parseString(whereclause_25,parseAll=True)
assert parsed_25.where_conditions[0].conf == '.4'
assert parsed_25.where_conditions[1].conf == '.6'
assert parsed_25.where_conditions[2].conf == '.5'
whereclause_26 = "WHERE KEY IN row_list_1 AND column_1 = 'a' AND TYPICALITY > .4"
parsed_26 = where_clause.parseString(whereclause_26,parseAll=True)
assert parsed_26.where_conditions[0].function.function_id == 'key'
assert parsed_26.where_conditions[0].value == 'row_list_1'
assert parsed_26.where_conditions[1].function.column == 'column_1'
assert parsed_26.where_conditions[2].function.function_id == 'typicality'
def test_key_in_rowlist():
assert key_in_rowlist_clause.parseString("key in row_list_1",parseAll=True).function.function_id == "key"
assert key_in_rowlist_clause.parseString("key in row_list_1",parseAll=True).value == "row_list_1"
def test_basic_select_pyparsing():
select_1 = "SELECT * FROM table_1"
select_1_parse = query.parseString(select_1,parseAll=True)
assert select_1_parse.statement_id == 'select'
assert select_1_parse.btable == 'table_1'
assert select_1_parse.functions[0].column_id == '*'
select_2 = "SELECT column_1,column_3 FROM table_1"
select_2_parse = query.parseString(select_2,parseAll=True)
assert select_2_parse.functions[0].column_id == 'column_1'
assert select_2_parse.functions[1].column_id == 'column_3'
select_3 = "PLOT SELECT column_1 FROM table_1 WHERE column_2 = 3"
select_3_parse = query.parseString(select_3,parseAll=True)
assert select_3_parse.plot == 'plot'
assert select_3_parse.functions[0].column_id == 'column_1'
assert select_3_parse.where_keyword == 'where'
assert select_3_parse.where_conditions[0].value == '3'
assert select_3_parse.where_conditions[0].function.column == 'column_2'
assert select_3_parse.where_conditions[0].operation == '='
select_4 = "SELECT col_1 FROM table_1 ORDER BY TYPICALITY LIMIT 10 SAVE TO ~/test.txt"
select_4_parse = query.parseString(select_4,parseAll=True)
assert select_4_parse.functions[0].column_id == 'col_1'
assert select_4_parse.order_by[0].function.function_id == 'typicality'
assert select_4_parse.limit == '10'
assert select_4_parse.filename == '~/test.txt'
def test_select_functions_pyparsing():
query_1 = "SELECT TYPICALITY FROM table_1"
query_2 = "SELECT TYPICALITY OF column_1 FROM table_1"
query_3 = "SELECT PREDICTIVE PROBABILITY OF column_1 FROM table_1"
query_4 = "SELECT PROBABILITY OF column_1 = 4 FROM table_1"
query_5 = "SELECT SIMILARITY TO 0 FROM table_1"
query_5 = "SELECT SIMILARITY TO column_1 = 4 FROM table_1"
query_6 = "SELECT DEPENDENCE PROBABILITY WITH column_1 FROM table_1"
query_7 = "SELECT MUTUAL INFORMATION OF column_1 WITH column_2 FROM table_1"
query_8 = "SELECT CORRELATION OF column_1 WITH column_2 FROM table_1"
query_9 = "SELECT TYPICALITY, PREDICTIVE PROBABILITY OF column_1 FROM table_1"
select_ast_1 = query.parseString(query_1,parseAll=True)
select_ast_2 = query.parseString(query_2,parseAll=True)
select_ast_3 = query.parseString(query_3,parseAll=True)
select_ast_4 = query.parseString(query_4,parseAll=True)
select_ast_5 = query.parseString(query_5,parseAll=True)
select_ast_6 = query.parseString(query_6,parseAll=True)
select_ast_7 = query.parseString(query_7,parseAll=True)
select_ast_8 = query.parseString(query_8,parseAll=True)
select_ast_9 = query.parseString(query_9,parseAll=True)
assert select_ast_1.statement_id == 'select'
assert select_ast_2.statement_id == 'select'
assert select_ast_3.statement_id == 'select'
assert select_ast_4.statement_id == 'select'
assert select_ast_5.statement_id == 'select'
assert select_ast_5.statement_id == 'select'
assert select_ast_6.statement_id == 'select'
assert select_ast_7.statement_id == 'select'
assert select_ast_8.statement_id == 'select'
assert select_ast_9.statement_id == 'select'
assert select_ast_1.functions[0].function_id == 'typicality'
assert select_ast_2.functions[0].function_id == 'typicality'
assert select_ast_3.functions[0].function_id == 'predictive probability'
assert select_ast_4.functions[0].function_id == 'probability'
assert select_ast_5.functions[0].function_id == 'similarity'
assert select_ast_5.functions[0].function_id == 'similarity'
assert select_ast_6.functions[0].function_id == 'dependence probability'
assert select_ast_7.functions[0].function_id == 'mutual information'
assert select_ast_8.functions[0].function_id == 'correlation'
assert select_ast_9.functions[0].function_id == 'typicality'
assert select_ast_9.functions[1].function_id == 'predictive probability'
def test_infer_pyparsing():
infer_1 = "INFER * FROM table_1"
infer_1_parse = query.parseString(infer_1,parseAll=True)
assert infer_1_parse.statement_id == 'infer'
assert infer_1_parse.btable == 'table_1'
assert infer_1_parse.functions[0].column_id == '*'
infer_2 = "infer column_1,column_3 FROM table_1"
infer_2_parse = query.parseString(infer_2,parseAll=True)
assert infer_2_parse.functions[0].column_id == 'column_1'
assert infer_2_parse.functions[1].column_id == 'column_3'
infer_3 = "SUMMARIZE infer column_1 FROM table_1 WHERE column_2 = 3"
infer_3_parse = query.parseString(infer_3,parseAll=True)
assert infer_3_parse.summarize == 'summarize'
assert infer_3_parse.functions[0].column_id == 'column_1'
assert infer_3_parse.where_keyword == 'where'
assert infer_3_parse.where_conditions[0].value == '3'
assert infer_3_parse.where_conditions[0].function.column == 'column_2'
assert infer_3_parse.where_conditions[0].operation == '='
infer_4 = "infer col_1 FROM table_1 ORDER BY TYPICALITY LIMIT 10 SAVE TO ~/test.txt"
infer_4_parse = query.parseString(infer_4,parseAll=True)
assert infer_4_parse.functions[0].column_id == 'col_1'
assert infer_4_parse.order_by[0].function.function_id == 'typicality'
assert infer_4_parse.limit == '10'
assert infer_4_parse.filename == '~/test.txt'
query_1 = "INFER TYPICALITY FROM table_1 WITH CONFIDENCE .4 WITH 4 SAMPLES"
query_2 = "INFER TYPICALITY OF column_1 FROM table_1 WITH CONFIDENCE .4 WITH 4 SAMPLES"
query_3 = "INFER PREDICTIVE PROBABILITY OF column_1 FROM table_1 WITH CONFIDENCE .4 WITH 4 SAMPLES"
query_4 = "INFER PROBABILITY OF column_1 = 4 FROM table_1 WITH CONFIDENCE .4 WITH 4 SAMPLES"
query_5 = "INFER SIMILARITY TO 0 FROM table_1 WITH CONFIDENCE .4 WITH 4 SAMPLES"
query_5 = "INFER SIMILARITY TO column_1 = 4 FROM table_1 WITH CONFIDENCE .4 WITH 4 SAMPLES"
query_6 = "INFER DEPENDENCE PROBABILITY WITH column_1 FROM table_1 WITH CONFIDENCE .4 WITH 4 SAMPLES"
query_7 = "INFER MUTUAL INFORMATION OF column_1 WITH column_2 FROM table_1 WITH CONFIDENCE .4 WITH 4 SAMPLES"
query_8 = "INFER CORRELATION OF column_1 WITH column_2 FROM table_1 WITH CONFIDENCE .4 WITH 4 SAMPLES"
query_9 = "INFER TYPICALITY, PREDICTIVE PROBABILITY OF column_1 FROM table_1 WITH CONFIDENCE .4 WITH 4 SAMPLES"
infer_ast_1 = query.parseString(query_1,parseAll=True)
infer_ast_2 = query.parseString(query_2,parseAll=True)
infer_ast_3 = query.parseString(query_3,parseAll=True)
infer_ast_4 = query.parseString(query_4,parseAll=True)
infer_ast_5 = query.parseString(query_5,parseAll=True)
infer_ast_6 = query.parseString(query_6,parseAll=True)
infer_ast_7 = query.parseString(query_7,parseAll=True)
infer_ast_8 = query.parseString(query_8,parseAll=True)
infer_ast_9 = query.parseString(query_9,parseAll=True)
assert infer_ast_1.statement_id == 'infer'
assert infer_ast_2.statement_id == 'infer'
assert infer_ast_3.statement_id == 'infer'
assert infer_ast_4.statement_id == 'infer'
assert infer_ast_5.statement_id == 'infer'
assert infer_ast_5.statement_id == 'infer'
assert infer_ast_6.statement_id == 'infer'
assert infer_ast_7.statement_id == 'infer'
assert infer_ast_8.statement_id == 'infer'
assert infer_ast_9.statement_id == 'infer'
assert infer_ast_1.functions[0].function_id == 'typicality'
assert infer_ast_2.functions[0].function_id == 'typicality'
assert infer_ast_3.functions[0].function_id == 'predictive probability'
assert infer_ast_4.functions[0].function_id == 'probability'
assert infer_ast_5.functions[0].function_id == 'similarity'
assert infer_ast_5.functions[0].function_id == 'similarity'
assert infer_ast_6.functions[0].function_id == 'dependence probability'
assert infer_ast_7.functions[0].function_id == 'mutual information'
assert infer_ast_8.functions[0].function_id == 'correlation'
assert infer_ast_9.functions[0].function_id == 'typicality'
assert infer_ast_9.functions[1].function_id == 'predictive probability'
assert infer_ast_1.samples == '4'
assert infer_ast_1.confidence == '.4'
assert infer_ast_2.samples == '4'
assert infer_ast_2.confidence == '.4'
assert infer_ast_3.samples == '4'
assert infer_ast_3.confidence == '.4'
assert infer_ast_4.samples == '4'
assert infer_ast_4.confidence == '.4'
assert infer_ast_5.samples == '4'
assert infer_ast_5.confidence == '.4'
assert infer_ast_6.samples == '4'
assert infer_ast_6.confidence == '.4'
assert infer_ast_7.samples == '4'
assert infer_ast_7.confidence == '.4'
assert infer_ast_8.samples == '4'
assert infer_ast_8.confidence == '.4'
assert infer_ast_9.samples == '4'
assert infer_ast_9.confidence == '.4'
def test_simulate_pyparsing():
query_1 = "SIMULATE * FROM table_1 WHERE column_1 = 4 TIMES 4 SAVE TO ~/test.csv"
simulate_ast = query.parseString(query_1,parseAll=True)
assert simulate_ast.statement_id == 'simulate'
assert simulate_ast.functions[0].column_id == '*'
assert simulate_ast.where_keyword == 'where'
assert simulate_ast.times == '4'
assert simulate_ast.filename == '~/test.csv'
query_2 = "SIMULATE col1,col2 FROM table_1 WHERE column_1 = 4 TIMES 4 SAVE TO ~/test.csv"
simulate_ast = query.parseString(query_2,parseAll=True)
assert simulate_ast.functions[0].column_id == 'col1'
assert simulate_ast.functions[1].column_id == 'col2'
query_3 = "SIMULATE col1, col2 FROM table_1"
simulate_ast = query.parseString(query_3, parseAll=True)
def test_estimate_columns_from_pyparsing():
query_1 = "ESTIMATE COLUMNS FROM table_1 WHERE col_1 = 4 ORDER BY TYPICALITY LIMIT 10 AS col_list_1"
est_col_ast_1 = query.parseString(query_1,parseAll=True)
assert est_col_ast_1.statement_id == 'estimate'
assert est_col_ast_1.btable == 'table_1'
assert est_col_ast_1.where_keyword == 'where'
assert est_col_ast_1.where_conditions[0].function.column == 'col_1'
assert est_col_ast_1.where_conditions[0].value == '4'
assert est_col_ast_1.order_by[0].function.function_id == 'typicality'
assert est_col_ast_1.limit == '10'
assert est_col_ast_1.as_column_list == 'col_list_1'
query_2 = "ESTIMATE COLUMNS FROM table_1"
est_col_ast_2 = query.parseString(query_2,parseAll=True)
assert est_col_ast_2.statement_id == 'estimate'
assert est_col_ast_2.btable == 'table_1'
def test_estimate_pairwise_pyparsing():
query_1 = "ESTIMATE PAIRWISE CORRELATION WITH col_1 FROM table_1"
est_pairwise_ast_1 = query.parseString(query_1,parseAll=True)
assert est_pairwise_ast_1.statement_id == 'estimate_pairwise'
assert est_pairwise_ast_1.functions[0].function_id == 'correlation'
assert est_pairwise_ast_1.functions[0].with_column == 'col_1'
assert est_pairwise_ast_1.btable == 'table_1'
query_2 = "ESTIMATE PAIRWISE DEPENDENCE PROBABILITY WITH col_1 FROM table_1 FOR col_1 SAVE TO file.csv SAVE CLUSTERS WITH THRESHOLD .4 AS col_list_1"
est_pairwise_ast_2 = query.parseString(query_2,parseAll=True)
assert est_pairwise_ast_2.statement_id == 'estimate_pairwise'
assert est_pairwise_ast_2.functions[0].function_id == 'dependence probability'
assert est_pairwise_ast_2.functions[0].with_column == 'col_1'
assert est_pairwise_ast_2.btable == 'table_1'
assert est_pairwise_ast_2.for_list.asList() == ['col_1']
assert est_pairwise_ast_2.filename == 'file.csv'
assert est_pairwise_ast_2.clusters_clause.threshold == '.4'
assert est_pairwise_ast_2.clusters_clause.as_label == 'col_list_1'
query_3 = "ESTIMATE PAIRWISE MUTUAL INFORMATION WITH col_1 FROM table_1"
est_pairwise_ast_3 = query.parseString(query_3,parseAll=True)
assert est_pairwise_ast_3.functions[0].function_id == 'mutual information'
def test_estimate_pairwise_row_pyparsing():
query_1 = "ESTIMATE PAIRWISE ROW SIMILARITY FROM table_1 SAVE CLUSTERS WITH THRESHOLD .4 INTO table_2"
est_pairwise_ast_1 = query.parseString(query_1,parseAll=True)
assert est_pairwise_ast_1.statement_id == 'estimate_pairwise_row'
assert est_pairwise_ast_1.functions[0].function_id == 'similarity'
assert est_pairwise_ast_1.btable == 'table_1'
query_2 = "ESTIMATE PAIRWISE ROW SIMILARITY FROM table_1 FOR a SAVE TO file.csv SAVE CLUSTERS WITH THRESHOLD .4 AS table_2"
est_pairwise_ast_2 = query.parseString(query_2,parseAll=True)
assert est_pairwise_ast_2.statement_id == 'estimate_pairwise_row'
assert est_pairwise_ast_2.functions[0].function_id == 'similarity'
assert est_pairwise_ast_2.btable == 'table_1'
assert est_pairwise_ast_2.for_list.asList() == ['a']
assert est_pairwise_ast_2.filename == 'file.csv'
assert est_pairwise_ast_2.clusters_clause.threshold == '.4'
assert est_pairwise_ast_2.clusters_clause.as_label == 'table_2'
def test_nested_queries_basic_pyparsing():
query_1 = "SELECT * FROM ( SELECT col_1,col_2 FROM table_2)"
ast = query.parseString(query_1,parseAll=True)
assert ast.statement_id == 'select'
assert ast.sub_query == " SELECT col_1,col_2 FROM table_2"
ast_2 = query.parseString(ast.sub_query,parseAll=True)
assert ast_2.statement_id == 'select'
assert ast_2.functions[0].column_id == 'col_1'
assert ast_2.functions[1].column_id == 'col_2'
def test_master_query_for_parse_errors():
# This test will not test for correct information, just successfull parsing
query_list = ["LIST BTABLES",
"SHOW SCHEMA FOR table_1",
"SHOW SCHEMA FOR table_1",
"SHOW DIAGNOSTICS FOR table_1",
"SHOW COLUMN LISTS FOR table_1",
"SHOW COLUMNS collist FOR table_1",
"LOAD MODELS ~/filename.csv INTO table_1",
"SAVE MODEL FROM table_1 to filename.pkl.gz",
"DROP BTABLE table_1",
"DROP MODEL 1 FROM table_1",
"DROP MODELS 1,2,6-9 FROM table_1",
"UPDATE SCHEMA FOR test_btablE SET col_1 = Categorical,col.2=numerical , col_3 = ignore",
"CREATE BTABLE test.btable FROM '~/filenam e.csv'",
"CREATE BTABLE test_btable FROM ~/filename.csv",
"EXECUTE FILE '/filenam e.bql'",
"ANALYZE table_1 FOR 10 ITERATIONS",
"ANALYZE table_1 MODELS 1-3, 5-7, 9, 10 FOR 1 ITERATION",
"ANALYZE table_1 MODELS 1-3 FOR 1 ITERATION",
"SELECT TYPICALITY FROM table_1",
"SELECT TYPICALITY OF column_1 FROM table_1",
"SELECT PREDICTIVE PROBABILITY OF column_1 FROM table_1",
"SELECT PROBABILITY OF column_1 = 4 FROM table_1",
"SELECT SIMILARITY TO 0 FROM table_1",
"SELECT SIMILARITY TO column_1 = 4 FROM table_1",
"SELECT DEPENDENCE PROBABILITY WITH column_1 FROM table_1",
"SELECT MUTUAL INFORMATION OF column_1 WITH column_2 FROM table_1",
"SELECT CORRELATION OF column_1 WITH column_2 FROM table_1",
"SELECT TYPICALITY, PREDICTIVE PROBABILITY OF column_1 FROM table_1",
"SELECT SIMILARITY TO 0 WITH RESPECT TO column_1, col2 FROM table_1",
"SELECT SIMILARITY TO a = 1 , PROBABILITY OF a = 1 FROM table_1"]
#SELECT PREDICTIVE PROBABILITY OF a, MUTUAL INFORMATION OF a WITH b, CORRELATION OF a WITH b, DEPENDENCE PROBABILITY OF a WITH b, SIMILARITY TO 0, SIMILARITY TO a = 1, PROBABILITY OF a = 1 FROM table_1
for query in query_list:
query = bql_statement.parseString(query,parseAll=True)
assert query.statement_id != ''
def test_list_btables():
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('list btables',parseAll=True))
assert method == 'list_btables'
assert args == {}
def test_initialize_models():
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('initialize 5 models for t',parseAll=True))
assert method == 'initialize_models'
assert args == dict(tablename='t', n_models=5, model_config=None)
def test_create_btable():
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('create btable t from fn',parseAll=True))
assert method == 'create_btable'
assert args == dict(tablename='t', cctypes_full=None)
assert client_dict == dict(csv_path=os.path.join(os.getcwd(), 'fn'))
def test_drop_btable():
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('drop btable t',parseAll=True))
assert method == 'drop_btable'
assert args == dict(tablename='t')
def test_drop_models():
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('drop models from t',parseAll=True))
assert method == 'drop_models'
assert args == dict(tablename='t', model_indices=None)
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('drop models 2-6 from t'))
assert method == 'drop_models'
assert args == dict(tablename='t', model_indices=range(2,7))
def test_analyze():
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('analyze t models 2-6 for 3 iterations'))
assert method == 'analyze'
assert args == dict(tablename='t', model_indices=range(2,7), iterations=3, seconds=None, ct_kernel=0, background=True)
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('analyze t for 6 iterations'))
assert method == 'analyze'
assert args == dict(tablename='t', model_indices=None, iterations=6, seconds=None, ct_kernel=0, background=True)
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('analyze t for 7 minutes'))
assert method == 'analyze'
assert args == dict(tablename='t', model_indices=None, iterations=None, seconds=7*60, ct_kernel=0, background=True)
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('analyze t models 2-6 for 7 minutes'))
assert method == 'analyze'
assert args == dict(tablename='t', model_indices=range(2,7), iterations=None, seconds=7*60, ct_kernel=0, background=True)
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('analyze t models 2-6 for 7 minutes with mh kernel'))
assert method == 'analyze'
assert args == dict(tablename='t', model_indices=range(2,7), iterations=None, seconds=7*60, ct_kernel=1, background=True)
def test_load_models():
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('load models fn into t'))
assert method == 'load_models'
assert args == dict(tablename='t')
assert client_dict == dict(pkl_path='fn')
def test_save_models():
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('save models from t to fn'))
assert method == 'save_models'
assert args == dict(tablename='t')
assert client_dict == dict(pkl_path='fn')
def test_parse_functions():
query_1 = "SELECT PREDICTIVE PROBABILITY OF a, MUTUAL INFORMATION OF a WITH b, CORRELATION OF a WITH b, DEPENDENCE PROBABILITY OF a WITH b, SIMILARITY TO 0, SIMILARITY TO a = 1 , PROBABILITY OF a = 1 , probability of b = 1 , TYPICALITY of a, typicality , a , * FROM table_1"
ast_1 = bql_statement.parseString(query_1, parseAll=True)
function_groups = ast_1.functions
queries, query_cols = parser.parse_functions(function_groups, M_c = test_M_c, M_c_full = test_M_c_full, T=test_T, T_full = test_T_full)
assert queries[0] == (functions._predictive_probability, 0, False)
assert queries[1] == (functions._mutual_information, (0,1), True)
assert queries[2] == (functions._correlation, (0,1), True)
assert queries[3] == (functions._dependence_probability, (0,1), True)
assert queries[4] == (functions._similarity, (0,None), False)
assert queries[5] == (functions._similarity, (0,None), False)
assert queries[6] == (functions._probability, (0,'1'), True)
assert queries[7] == (functions._probability, (1,1), True)
assert queries[8] == (functions._col_typicality, 0, True)
assert queries[9] == (functions._row_typicality, True, False)
assert queries[10] == (functions._column, (0, None), False)
assert queries[11] == (functions._column, (0, None), False)
assert queries[12] == (functions._column, (1, None), False)
def test_select():
##TODO test client_dict
tablename = 't'
newtablename = 'newtable'
functions = function_in_query.parseString('*',parseAll=True)
whereclause = None
limit = float('inf')
order_by = False
plot = False
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('select * from t'))
d = dict(tablename=tablename, functions=functions, whereclause=whereclause,
limit=limit, order_by=order_by, plot=plot, modelids=None, summarize=False)
assert method == 'select'
assert args['tablename'] == d['tablename']
assert args['functions'][0].column_id == d['functions'][0].column_id
assert args['whereclause'] == d['whereclause']
assert args['limit'] == d['limit']
assert args['order_by'] == d['order_by']
assert args['plot'] == d['plot']
assert args['modelids'] == d['modelids']
assert args['summarize'] == d['summarize']
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('summarize select * from t'))
d = dict(tablename=tablename, functions=functions, whereclause=whereclause,
limit=limit, order_by=order_by, plot=plot, modelids=None, summarize=True)
assert method == 'select'
assert args['tablename'] == d['tablename']
assert args['functions'][0].column_id == d['functions'][0].column_id
assert args['whereclause'] == d['whereclause']
assert args['limit'] == d['limit']
assert args['order_by'] == d['order_by']
assert args['plot'] == d['plot']
assert args['modelids'] == d['modelids']
assert args['summarize'] == d['summarize']
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('select a, b, a_b from t'))
d = dict(tablename=tablename, functions=None, whereclause=whereclause,
limit=limit, order_by=order_by, plot=plot, modelids=None, summarize=False)
assert method == 'select'
assert args['tablename'] == d['tablename']
assert args['functions'][0].column_id == 'a'
assert args['functions'][1].column_id == 'b'
assert args['functions'][2].column_id == 'a_b'
assert args['whereclause'] == d['whereclause']
assert args['limit'] == d['limit']
assert args['order_by'] == d['order_by']
assert args['plot'] == d['plot']
assert args['modelids'] == d['modelids']
assert args['summarize'] == d['summarize']
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('select * from t where a=6 and b = 7'))
d = dict(tablename=tablename, functions=functions, whereclause=whereclause,
limit=limit, order_by=order_by, plot=plot, modelids=None, summarize=False)
assert method == 'select'
assert args['tablename'] == d['tablename']
assert args['functions'][0].column_id == '*'
assert args['whereclause'][0].function.column == 'a'
assert args['whereclause'][0].value == '6'
assert args['whereclause'][1].function.column == 'b'
assert args['whereclause'][1].value == '7'
assert args['limit'] == d['limit']
assert args['order_by'] == d['order_by']
assert args['plot'] == d['plot']
assert args['modelids'] == d['modelids']
assert args['summarize'] == d['summarize']
limit = 10
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('select * from t where a=6 and b = 7 limit 10'))
d = dict(tablename=tablename, functions=functions, whereclause=whereclause,
limit=limit, order_by=order_by, plot=plot, modelids=None, summarize=False)
assert method == 'select'
assert args['tablename'] == d['tablename']
assert args['functions'][0].column_id == d['functions'][0].column_id
assert args['whereclause'][0].function.column == 'a'
assert args['whereclause'][0].value == '6'
assert args['whereclause'][1].function.column == 'b'
assert args['whereclause'][1].value == '7'
assert args['limit'] == d['limit']
assert args['order_by'] == d['order_by']
assert args['plot'] == d['plot']
assert args['modelids'] == d['modelids']
assert args['summarize'] == d['summarize']
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('select * from t where a=6 and b = 7 order by b limit 10'))
order_by = [('b', True)],
d = dict(tablename=tablename, functions=functions, whereclause=whereclause,
limit=limit, order_by=order_by, plot=plot, modelids=None, summarize=False)
assert method == 'select'
assert args['tablename'] == d['tablename']
assert args['functions'][0].column_id == d['functions'][0].column_id
assert args['whereclause'][0].function.column == 'a'
assert args['whereclause'][0].value == '6'
assert args['whereclause'][1].function.column == 'b'
assert args['whereclause'][1].value == '7'
assert args['limit'] == d['limit']
#assert args['order_by'] == d['order_by'] ##TODO
assert args['plot'] == d['plot']
assert args['modelids'] == d['modelids']
assert args['summarize'] == d['summarize']
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('select * from t where a=6 and b = 7 order by b limit 10 into newtable'))
order_by = [('b', True)],
d = dict(tablename=tablename, functions=functions, whereclause=whereclause,
limit=limit, order_by=order_by, plot=plot, modelids=None, summarize=False, newtablename=newtablename)
assert method == 'select'
assert args['tablename'] == d['tablename']
assert args['functions'][0].column_id == d['functions'][0].column_id
assert args['whereclause'][0].function.column == 'a'
assert args['whereclause'][0].value == '6'
assert args['whereclause'][1].function.column == 'b'
assert args['whereclause'][1].value == '7'
assert args['limit'] == d['limit']
#assert args['order_by'] == d['order_by'] ##TODO
assert args['plot'] == d['plot']
assert args['modelids'] == d['modelids']
assert args['summarize'] == d['summarize']
assert args['newtablename'] == d['newtablename']
methods, args, client_dict = parser.parse_single_statement(bql_statement.parseString('freq select a from t'))
d = dict(tablename=tablename, plot=plot, modelids=None, summarize=False, hist=False, freq=True)
assert method == 'select'
assert args['tablename'] == d['tablename']
assert args['plot'] == d['plot']
assert args['modelids'] == d['modelids']
assert args['summarize'] == d['summarize']
assert args['freq'] == d['freq']
assert args['hist'] == d['hist']
methods, args, client_dict = parser.parse_single_statement(bql_statement.parseString('hist select a from t'))
d = dict(tablename=tablename, plot=plot, modelids=None, summarize=False, hist=True, freq=False)
assert method == 'select'
assert args['tablename'] == d['tablename']
assert args['plot'] == d['plot']
assert args['modelids'] == d['modelids']
assert args['freq'] == d['freq']
assert args['hist'] == d['hist']
def test_infer(): ##TODO
##TODO test client_dict
tablename = 't'
newtablename = 'newtable'
functions = function_in_query.parseString('*',parseAll=True)
whereclause = None
limit = float('inf')
order_by = False
plot = False
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('infer * from t'))
d = dict(tablename=tablename, functions=functions, whereclause=whereclause,
limit=limit, order_by=order_by, plot=plot, modelids=None, summarize=False)
assert method == 'infer'
assert args['tablename'] == d['tablename']
assert args['functions'][0].column_id == d['functions'][0].column_id
assert args['whereclause'] == d['whereclause']
assert args['limit'] == d['limit']
assert args['order_by'] == d['order_by']
assert args['plot'] == d['plot']
assert args['modelids'] == d['modelids']
assert args['summarize'] == d['summarize']
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('summarize infer * from t'))
d = dict(tablename=tablename, functions=functions, whereclause=whereclause,
limit=limit, order_by=order_by, plot=plot, modelids=None, summarize=True)
assert method == 'infer'
assert args['tablename'] == d['tablename']
assert args['functions'][0].column_id == d['functions'][0].column_id
assert args['whereclause'] == d['whereclause']
assert args['limit'] == d['limit']
assert args['order_by'] == d['order_by']
assert args['plot'] == d['plot']
assert args['modelids'] == d['modelids']
assert args['summarize'] == d['summarize']
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('infer a, b, a_b from t'))
d = dict(tablename=tablename, functions=None, whereclause=whereclause,
limit=limit, order_by=order_by, plot=plot, modelids=None, summarize=False)
assert method == 'infer'
assert args['tablename'] == d['tablename']
assert args['functions'][0].column_id == 'a'
assert args['functions'][1].column_id == 'b'
assert args['functions'][2].column_id == 'a_b'
assert args['whereclause'] == d['whereclause']
assert args['limit'] == d['limit']
assert args['order_by'] == d['order_by']
assert args['plot'] == d['plot']
assert args['modelids'] == d['modelids']
assert args['summarize'] == d['summarize']
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('infer * from t where a=6 and b = 7'))
d = dict(tablename=tablename, functions=functions, whereclause=whereclause,
limit=limit, order_by=order_by, plot=plot, modelids=None, summarize=False)
assert method == 'infer'
assert args['tablename'] == d['tablename']
assert args['functions'][0].column_id == '*'
assert args['whereclause'][0].function.column == 'a'
assert args['whereclause'][0].value == '6'
assert args['whereclause'][1].function.column == 'b'
assert args['whereclause'][1].value == '7'
assert args['limit'] == d['limit']
assert args['order_by'] == d['order_by']
assert args['plot'] == d['plot']
assert args['modelids'] == d['modelids']
assert args['summarize'] == d['summarize']
limit = 10
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('infer * from t where a=6 and b = 7 limit 10'))
d = dict(tablename=tablename, functions=functions, whereclause=whereclause,
limit=limit, order_by=order_by, plot=plot, modelids=None, summarize=False)
assert method == 'infer'
assert args['tablename'] == d['tablename']
assert args['functions'][0].column_id == d['functions'][0].column_id
assert args['whereclause'][0].function.column == 'a'
assert args['whereclause'][0].value == '6'
assert args['whereclause'][1].function.column == 'b'
assert args['whereclause'][1].value == '7'
assert args['limit'] == d['limit']
assert args['order_by'] == d['order_by']
assert args['plot'] == d['plot']
assert args['modelids'] == d['modelids']
assert args['summarize'] == d['summarize']
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('infer * from t where a=6 and b = 7 limit 10 into newtable'))
d = dict(tablename=tablename, functions=functions, whereclause=whereclause,
limit=limit, order_by=order_by, plot=plot, modelids=None, summarize=False, newtablename=newtablename)
assert method == 'infer'
assert args['tablename'] == d['tablename']
assert args['functions'][0].column_id == d['functions'][0].column_id
assert args['whereclause'][0].function.column == 'a'
assert args['whereclause'][0].value == '6'
assert args['whereclause'][1].function.column == 'b'
assert args['whereclause'][1].value == '7'
assert args['limit'] == d['limit']
assert args['order_by'] == d['order_by']
assert args['plot'] == d['plot']
assert args['modelids'] == d['modelids']
assert args['summarize'] == d['summarize']
assert args['newtablename'] == d['newtablename']
order_by = [('b', False)]
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('infer * from t where a=6 and b = 7 order by b limit 10'))
d = dict(tablename=tablename, functions=functions, whereclause=whereclause,
limit=limit, order_by=order_by, plot=plot, modelids=None, summarize=False)
assert method == 'infer'
assert args['tablename'] == d['tablename']
assert args['functions'][0].column_id == d['functions'][0].column_id
assert args['whereclause'][0].function.column == 'a'
assert args['whereclause'][0].value == '6'
assert args['whereclause'][1].function.column == 'b'
assert args['whereclause'][1].value == '7'
assert args['limit'] == d['limit']
#assert args['order_by'] == d['order_by'] ##TODO
assert args['plot'] == d['plot']
assert args['modelids'] == d['modelids']
assert args['summarize'] == d['summarize']
def test_simulate(): ##TODO
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('simulate * from t times 10'))
assert method == 'simulate'
assert args['tablename'] == 't'
assert args['functions'][0].column_id == '*'
assert args['summarize'] == False
assert args['plot'] == False
assert args['order_by'] == False
assert args['modelids'] == None
assert args['newtablename'] == None
assert args['givens'] == None
assert args['numpredictions'] == 10
assert client_dict['pairwise'] == False
assert client_dict['filename'] == None
assert client_dict['scatter'] == False
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('simulate * from t times 10 into newtable'))
assert method == 'simulate'
assert args['tablename'] == 't'
assert args['functions'][0].column_id == '*'
assert args['summarize'] == False
assert args['plot'] == False
assert args['order_by'] == False
assert args['modelids'] == None
assert args['newtablename'] == 'newtable'
assert args['givens'] == None
assert args['numpredictions'] == 10
assert client_dict['pairwise'] == False
assert client_dict['filename'] == None
assert client_dict['scatter'] == False
##TODO more clauses
def test_estimate():
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('estimate columns from t'))
assert method == 'estimate_columns'
assert args['tablename'] == 't'
assert args['functions'][0] == 'column'
assert args['whereclause'] == None
assert args['limit'] == float('inf')
assert args['order_by'] == False
assert args['name'] == None
assert args['modelids'] == None
assert client_dict == None
def test_estimate_pairwise():
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('estimate pairwise correlation from t'))
assert method == 'estimate_pairwise'
assert args['tablename'] == 't'
assert args['function_name'] == 'correlation'
assert args['column_list'] == None
assert args['clusters_name'] == None
assert args['threshold'] == None
assert args['modelids'] == None
assert client_dict['filename'] == None
def test_estimate_pairwise_row():
method, args, client_dict = parser.parse_single_statement(bql_statement.parseString('estimate pairwise row similarity from t'))
assert method == 'estimate_pairwise_row'
assert args['tablename'] == 't'
assert args['function'].function_id == 'similarity'
assert args['row_list'] == None
assert args['clusters_name'] == None
assert args['threshold'] == None
assert args['modelids'] == None
assert client_dict['filename'] == None
def test_disallowed_queries():
"""
All of these queries should pass the grammar and fail at parser.parse_query
"""
strings = ["select * from test times 10",
"select * from test save clusters with threshold .5 as test.csv",
"select * from test given a=5",
"select * from test with confidence .4",
"select a conf .4 from test",
"select a conf .4, b from test",
"simulate a conf .4 from test times 10",
"simulate a conf .4, b from test times 10",
"infer * from test times 10",
"infer typicality from test",
"infer * from test with confidence 1.5",
"simulate typicality from test",
"infer * from test save clusters with threshold .5 as test.csv",
"infer * from test given a=5",
"simulate * from test where a < 4",
"simulate * from test save clusters with threshold .5 as test.csv",
"simulate * from test with confidence .4",
"simulate * from test with 4 samples",
"simulate * from test",
"estimate columns from test with confidence .4",
"estimate columns from test given a=4",
"estimate columns from test times 10",
"summarize estimate columns from test",
"plot estimate columns from test",
"estimate columns from test save clusters with threshold .5 as test.csv",
"estimate pairwise correlation from test where a = b",
"estimate pairwise correlation from test times 10",
"estimate pairwise correlation from test given a = 5",
"estimate pairwise correlation from test with confidence .2",
"estimate pairwise row similarity from test where a = b",
"estimate pairwise row similarity from test times 10",
"estimate pairwise row similarity from test given a = 5",
"estimate pairwise row similarity from test with confidence .2",
"estimate pairwise row similarity from test where a = b"
]
for query_string in strings:
ast = bql_statement.parseString(query_string,parseAll=True)
with pytest.raises(AssertionError):
parser.parse_single_statement(ast)
def test_old_grammar_fails():
"""
All of these queries are formerly valid but should not pass the current grammar.
"""
strings = [
'update schema for test set a = continuous',
'update schema for test set a = multinomial',
# Cyclic is invalid because it should be followed by (min, max) parameters.
'update schema for test set a = cyclic'
]
for query_string in strings:
with pytest.raises(ParseException):
ast = bql_statement.parseString(query_string, parseAll=True)
def test_label_and_metadata():
# LABEL COLUMNS FOR <btable> SET <column1 = column-label-1> [, <column-name-2 = column-label-2>, ...]
query_str1 = 'label columns for test set col_1 = label1, col_2 = "label 2"'
ast = bql_statement.parseString(query_str1, parseAll=True)
assert ast.label_clause[0][0] == 'col_1'
assert ast.label_clause[0][1] == 'label1'
assert ast.label_clause[1][0] == 'col_2'
assert ast.label_clause[1][1] == 'label 2'
| {
"content_hash": "04ea6d725153ee039c538d70a6471cae",
"timestamp": "",
"source": "github",
"line_count": 1437,
"max_line_length": 278,
"avg_line_length": 57.17466945024356,
"alnum_prop": 0.6580939629990263,
"repo_name": "poppingtonic/BayesDB",
"id": "d601549e2a9317602246d7ac450a85009726362f",
"size": "82965",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bayesdb/tests/test_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "515"
},
{
"name": "HTML",
"bytes": "941719"
},
{
"name": "JavaScript",
"bytes": "18"
},
{
"name": "Python",
"bytes": "698491"
},
{
"name": "Ruby",
"bytes": "1866"
},
{
"name": "Shell",
"bytes": "817"
}
],
"symlink_target": ""
} |
import Tkinter
from PIL import Image, ImageDraw, ImageTk
import Layer
import ChooseFrieze, L_sys_Main, DrawMain, ChooseWallpaper
# This class stores the layers, their thumbnails, and coords
class Meta:
def __init__(self, i):
self.instance = i
self.im = Image.new('RGBA', (900, 600), 'white')
self.layers = []
self.layerscoords = []
self.layer_thumb = []
self.focus = -1 # layer under focus
# add a layer given an image
def add_layer(self, im):
if self.focus >-1: # exist previous layer
# that layer looses focus
self.layer_thumb[self.focus].lose_focus()
# New layer instace
layer = Layer.Layer(im, self.instance.layers, self)
# add records
self.layers = [im] + self.layers
self.layerscoords = [(450,300)] + self.layerscoords
self.layer_thumb = [layer] + self.layer_thumb
# add thumbnail
layer.cv.pack()
layer.change_focus() # focus on new layer
self.focus = self.layer_thumb.index(layer) # record focus
self.instance.redraw() # redraw canvas
# changes focus layer (on click)
def change_focus(self, layer):
if self.focus< len(self.layer_thumb):
self.layer_thumb[self.focus].lose_focus() # loose old focus
if layer in self.layer_thumb:
self.focus = self.layer_thumb.index(layer)
elif self.layer_thumb:
self.layer_thumb[0].change_focus() # first layer
self.focus = 0
else: # no remaining layer
self.focus = -1
# delete a layer and its thumbnail
def del_layer(self, layer):
i = self.layer_thumb.index(layer)
self.layer_thumb[i].cv.pack_forget()
self.layer_thumb.pop(i)
self.layers.pop(i)
self.layerscoords.pop(i)
self.change_focus(layer)
self.instance.redraw()
class Main:
def __init__(self):
self.root = Tkinter.Tk()
self.root.geometry('1080x680')
self.meta = Meta(self)
# layout
self.cmd = Tkinter.Frame(self.root, width=1080, height=60, bg='#dddddd')
self.cmd.grid()
self.f2 = Tkinter.Frame(self.root, width=1080, height=620)
self.f2.grid(row=1,column=0)
self.cv = Tkinter.Canvas(self.f2, bd=0, highlightthickness=0,
width=900, height=600, bg='white')
self.cv.grid(row=0,column=0, padx=15, pady=15)
self.cv.bind('<B1-Motion>', self.move_layer)
self.cv.bind('<Button-1>', self.press)
self.cv.bind('<ButtonRelease-1>', self.release)
self.layers = Tkinter.Frame(self.f2, width=150, height=620)
self.layers.grid(row=0, column=1)
# drawing choices
freedraw = Tkinter.Button(self.cmd, text='Add Free Drawing', bg='#cccccc',
relief=Tkinter.FLAT, command=lambda:DrawMain.Draw(0, 0, self.meta))
freedraw.pack(side=Tkinter.LEFT, padx=5, pady=10)
frieze = Tkinter.Button(self.cmd, text='Add Frieze', bg='#cccccc',
relief=Tkinter.FLAT, command=lambda:ChooseFrieze.ChooseFrieze(self.meta))
frieze.pack(side=Tkinter.LEFT, padx=5, pady=10)
wall = Tkinter.Button(self.cmd, text='Add Wallpaper', bg='#cccccc',
relief=Tkinter.FLAT, command=lambda: ChooseWallpaper.ChooseWallpaper(self.meta))
wall.pack(side=Tkinter.LEFT, padx=5, pady=10)
frac = Tkinter.Button(self.cmd, text='Add L-fractal', bg='#cccccc',
relief=Tkinter.FLAT, command=lambda:L_sys_Main.Main(self.meta))
frac.pack(side=Tkinter.LEFT, padx=5, pady=10)
self.imcoords = (0,0)
self.photos = []
self.root.mainloop()
# redraw canvas
def redraw(self):
self.photos = []
self.images = []
for i in range(len(self.meta.layers)):
self.photos.append(ImageTk.PhotoImage(self.meta.layers[i]))
self.images.append(self.cv.create_image(self.meta.layerscoords[i][0],
self.meta.layerscoords[i][1], image=self.photos[-1]))
# on press save current pos to calculate offset at move
def press(self, e):
if self.meta.layers:
self.x = e.x
self.y = e.y
self.imcoords = self.meta.layerscoords[self.meta.focus]
# on drag, move layer by offset = chnage in mouse pos from press
def move_layer(self, e):
if self.meta.layers:
dx = e.x - self.x
dy = e.y - self.y
self.cv.coords(self.images[self.meta.focus], self.imcoords[0]+dx, self.imcoords[1]+dy)
# on release save the new coords in meta class
def release(self, e):
if self.meta.layers:
self.meta.layerscoords[self.meta.focus] = self.imcoords[0]+e.x - self.x, self.imcoords[1]+e.y - self.y
self.redraw()
if __name__ == '__main__':
application = Main()
| {
"content_hash": "c521f5068139207093093d7d205656c7",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 114,
"avg_line_length": 36.75373134328358,
"alnum_prop": 0.5955329949238579,
"repo_name": "mntalateyya/Shapes_Studio",
"id": "684a36308a0ccd0b47aee0ad6f0e58fdaceaa582",
"size": "4925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ShapesStudio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "60711"
}
],
"symlink_target": ""
} |
import os
import oslo_messaging as messaging
import requests
from oslo_log import log as logging
from gbpservice.nfp.configurator.agents import agent_base
from gbpservice.nfp.configurator.lib import fw_constants as const
from gbpservice.nfp.configurator.lib import utils as load_driver
from gbpservice.nfp.core import event as nfp_event
LOG = logging.getLogger(__name__)
""" Implements Fwaas response path to Neutron plugin.
Methods of this class are invoked by the FwaasEventHandler class
for sending response from driver to the Fwaas Neutron plugin.
"""
class FwaasRpcSender(agent_base.AgentBaseEventHandler):
def __init__(self, sc, host, drivers, rpcmgr):
super(FwaasRpcSender, self).__init__(sc, drivers, rpcmgr)
self.host = host
def set_firewall_status(self, agent_info,
firewall_id, status, firewall=None):
""" Enqueues the response from FwaaS operation to neutron plugin.
:param context: Neutron context
:param firewall_id: id of firewall resource
:param status: ACTIVE/ ERROR
"""
msg = {'info': {'service_type': const.SERVICE_TYPE,
'context': agent_info['context']},
'notification': [{
'resource': agent_info['resource'],
'data': {'firewall_id': firewall_id,
'host': self.host,
'status': status,
'notification_type': (
'set_firewall_status'),
'firewall': firewall}}]
}
self.notify._notification(msg)
def firewall_deleted(self, agent_info, firewall_id, firewall=None):
""" Enqueues the response from FwaaS operation to neutron plugin.
:param context: Neutron context
:param firewall_id: id of firewall resource
"""
msg = {'info': {'service_type': const.SERVICE_TYPE,
'context': agent_info['context']},
'notification': [{
'resource': agent_info['resource'],
'data': {'firewall_id': firewall_id,
'host': self.host,
'notification_type': (
'firewall_deleted'),
'firewall': firewall}}]
}
self.notify._notification(msg)
""" Implements FWaasRpcManager class which receives requests
from Configurator to Agent.
Methods of this class are invoked by the configurator. Events are
created according to the requests received and enqueued to worker queues.
"""
class FWaasRpcManager(agent_base.AgentBaseRPCManager):
RPC_API_VERSION = '1.0'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, sc, conf):
"""Instantiates child and parent class objects.
:param sc: Service Controller object that is used to communicate
with process model core file.
:param conf: Configuration object that is used for configuration
parameter access.
"""
super(FWaasRpcManager, self).__init__(sc, conf)
def _create_event(self, context, firewall, host, method):
""" Creates and enqueues the events to the worker queues.
:param context: Neutron context
:param firewall: Firewall resource object from neutron fwaas plugin
:param host: Name of the host machine
:param method: CREATE_FIREWALL/UPDATE_FIREWALL/DELETE_FIREWALL
"""
arg_dict = {'context': context,
'firewall': firewall,
'host': host}
# REVISIT(mak): How to send large data ?
# New API required to send over unix sockert ?
context['service_info'] = {}
# ev = self.sc.new_event(id=method, data={}, key=None)
ev = self.sc.new_event(id=method, data=arg_dict, key=None)
self.sc.post_event(ev)
def create_firewall(self, context, firewall, host):
""" Receives request to create firewall from configurator
"""
msg = ("FwaasRpcReceiver received Create Firewall request.")
LOG.debug(msg)
self._create_event(context, firewall,
host, const.FIREWALL_CREATE_EVENT)
def update_firewall(self, context, firewall, host):
""" Receives request to update firewall from configurator
"""
msg = ("FwaasRpcReceiver received Update Firewall request.")
LOG.debug(msg)
self._create_event(context, firewall,
host, const.FIREWALL_UPDATE_EVENT)
def delete_firewall(self, context, firewall, host):
""" Receives request to delete firewall from configurator
"""
msg = ("FwaasRpcReceiver received Delete Firewall request.")
LOG.debug(msg)
self._create_event(context, firewall,
host, const.FIREWALL_DELETE_EVENT)
""" Handler class which invokes firewall driver methods
Worker processes dequeue the worker queues and invokes the
appropriate handler class methods for Fwaas methods.
"""
class FWaasEventHandler(object):
def __init__(self, sc, drivers, rpcmgr, conf):
""" Instantiates class object.
:param sc: Service Controller object that is used to communicate
with process model core file.
:param drivers: dictionary of driver name to object mapping
:param rpcmgr: FwaasRpcManager class object
"""
self.sc = sc
self.conf = conf
self.drivers = drivers
self.host = self.conf.host
self.rpcmgr = rpcmgr
self.plugin_rpc = FwaasRpcSender(sc, self.host,
self.drivers, self.rpcmgr)
def _get_driver(self, service_vendor):
""" Retrieves driver object given the service type
"""
driver_id = const.SERVICE_TYPE + service_vendor
return self.drivers[driver_id]
def _is_firewall_rule_exists(self, fw):
""" Checks if firewall rules are present in the request data
:param fw: Firewall resource object
"""
if not fw['firewall_rule_list']:
return False
else:
return True
def handle_event(self, ev):
""" Demultiplexes the firewall request to appropriate
driver methods.
:param ev: event object sent from process model event handler
"""
try:
msg = ("Worker process with ID: %s starting to "
"handle task: %s of type firewall. "
% (os.getpid(), ev.id))
LOG.debug(msg)
# The context here in ev.data is the neutron context that was
# renamed to context in the agent_base. This erstwhile
# neutron context contains the agent info which in turn contains
# the API context alongside other relevant information like
# service vendor and type. Agent info is constructed inside
# the demuxer library.
service_vendor = ev.data['context']['agent_info']['service_vendor']
driver = self._get_driver(service_vendor)
self.method = getattr(driver, "%s" % (ev.id.lower()))
self.invoke_driver_for_plugin_api(ev)
except Exception as err:
msg = ("Failed to perform the operation: %s. %s"
% (ev.id, str(err).capitalize()))
LOG.error(msg)
def invoke_driver_for_plugin_api(self, ev):
""" Invokes the appropriate driver methods
:param ev: event object sent from process model event handler
"""
context = ev.data['context']
agent_info = context.pop('agent_info')
firewall = ev.data.get('firewall')
host = ev.data.get('host')
if ev.id == const.FIREWALL_CREATE_EVENT:
if not self._is_firewall_rule_exists(firewall):
msg = ("Firewall status set to ACTIVE")
LOG.debug(msg)
return self.plugin_rpc.set_firewall_status(
agent_info,
firewall['id'], const.STATUS_ACTIVE, firewall)
# Added to handle in service vm agents. VM agent will add
# default DROP rule.
# if not self._is_firewall_rule_exists(firewall):
# self.plugin_rpc.set_firewall_status(
# context, firewall['id'], const.STATUS_ACTIVE)
try:
status = self.method(context, firewall, host)
except Exception as err:
self.plugin_rpc.set_firewall_status(
agent_info, firewall['id'], const.STATUS_ERROR)
msg = ("Failed to configure Firewall and status is "
"changed to ERROR. %s." % str(err).capitalize())
LOG.error(msg)
else:
self.plugin_rpc.set_firewall_status(
agent_info, firewall['id'], status, firewall)
msg = ("Configured Firewall and status set to %s" % status)
LOG.info(msg)
elif ev.id == const.FIREWALL_DELETE_EVENT:
if not self._is_firewall_rule_exists(firewall):
return self.plugin_rpc.firewall_deleted(
agent_info, firewall['id'], firewall)
try:
status = self.method(context, firewall, host)
except requests.ConnectionError:
# FIXME It can't be correct everytime
msg = ("There is a connection error for firewall %r of "
"tenant %r. Assuming either there is serious "
"issue with VM or data path is completely "
"broken. For now marking that as delete."
% (firewall['id'], firewall['tenant_id']))
LOG.warning(msg)
self.plugin_rpc.firewall_deleted(
agent_info, firewall['id'], firewall)
except Exception as err:
# TODO(VIKASH) Is it correct to raise ? As the subsequent
# attempt to clean will only re-raise the last one.And it
# can go on and on and may not be ever recovered.
self.plugin_rpc.set_firewall_status(
agent_info, firewall['id'], const.STATUS_ERROR)
msg = ("Failed to delete Firewall and status is "
"changed to ERROR. %s." % str(err).capitalize())
LOG.error(msg)
# raise(err)
else:
if status == const.STATUS_ERROR:
self.plugin_rpc.set_firewall_status(
agent_info, firewall['id'], status)
else:
msg = ("Firewall %r deleted of tenant: %r" % (
firewall['id'], firewall['tenant_id']))
LOG.info(msg)
self.plugin_rpc.firewall_deleted(
agent_info, firewall['id'], firewall)
elif ev.id == const.FIREWALL_UPDATE_EVENT:
if not self._is_firewall_rule_exists(firewall):
return self.plugin_rpc.set_firewall_status(
agent_info,
const.STATUS_ACTIVE, firewall)
try:
status = self.method(context, firewall, host)
except Exception as err:
self.plugin_rpc.set_firewall_status(
agent_info, firewall['id'], const.STATUS_ERROR)
msg = ("Failed to update Firewall and status is "
"changed to ERROR. %s." % str(err).capitalize())
LOG.error(msg)
else:
self.plugin_rpc.set_firewall_status(
agent_info, firewall['id'], status, firewall)
msg = ("Updated Firewall and status set to %s" % status)
LOG.info(msg)
else:
msg = ("Wrong call to Fwaas event handler.")
raise Exception(msg)
def events_init(sc, drivers, rpcmgr, conf):
"""Registers events with core service controller.
All the events will come to handle_event method of class instance
registered in 'handler' field.
:param drivers: Driver instances registered with the service agent
:param rpcmgr: Instance to receive all the RPC messages from configurator
module.
Returns: None
"""
event_id_list = [const.FIREWALL_CREATE_EVENT,
const.FIREWALL_UPDATE_EVENT,
const.FIREWALL_DELETE_EVENT]
evs = []
for event in event_id_list:
evs.append(nfp_event.Event(id=event, handler=FWaasEventHandler(
sc, drivers, rpcmgr, conf)))
sc.register_events(evs)
def load_drivers(conf):
"""Imports all the driver files corresponding to this agent.
Returns: Dictionary of driver objects with a specified service type and
vendor name
"""
ld = load_driver.ConfiguratorUtils()
drivers = ld.load_drivers(const.DRIVERS_DIR)
for service_type, driver_name in drivers.iteritems():
driver_obj = driver_name(conf=conf)
drivers[service_type] = driver_obj
return drivers
def register_service_agent(cm, sc, conf, rpcmgr):
"""Registers Fwaas service agent with configurator module.
:param cm: Instance of configurator module
:param sc: Instance of core service controller
:param conf: Instance of oslo configuration
:param rpcmgr: Instance containing RPC methods which are invoked by
configurator module on corresponding RPC message arrival
"""
service_type = const.SERVICE_TYPE
cm.register_service_agent(service_type, rpcmgr)
def init_agent(cm, sc, conf):
"""Initializes Fwaas agent.
:param cm: Instance of configuration module
:param sc: Instance of core service controller
:param conf: Instance of oslo configuration
"""
try:
drivers = load_drivers(conf)
except Exception as err:
msg = ("Fwaas failed to load drivers. %s"
% (str(err).capitalize()))
LOG.error(msg)
raise Exception(err)
else:
msg = ("Fwaas loaded drivers successfully.")
LOG.debug(msg)
rpcmgr = FWaasRpcManager(sc, conf)
try:
events_init(sc, drivers, rpcmgr, conf)
except Exception as err:
msg = ("Fwaas Events initialization unsuccessful. %s"
% (str(err).capitalize()))
LOG.error(msg)
raise Exception(err)
else:
msg = ("Fwaas Events initialization successful.")
LOG.debug(msg)
try:
register_service_agent(cm, sc, conf, rpcmgr)
except Exception as err:
msg = ("Fwaas service agent registration unsuccessful. %s"
% (str(err).capitalize()))
LOG.error(msg)
raise Exception(err)
else:
msg = ("Fwaas service agent registration successful.")
LOG.debug(msg)
msg = ("FIREWALL as a Service Module Initialized.")
LOG.info(msg)
def init_agent_complete(cm, sc, conf):
""" Initializes periodic tasks
"""
msg = (" Firewall agent init complete")
LOG.info(msg)
| {
"content_hash": "7cceefa0b2ef9096600750e3e6797943",
"timestamp": "",
"source": "github",
"line_count": 433,
"max_line_length": 79,
"avg_line_length": 35.4526558891455,
"alnum_prop": 0.5765748159729008,
"repo_name": "jiahaoliang/group-based-policy",
"id": "29b64cce8f98bf3ddecb3b16d5a120cbc5c12e4f",
"size": "15924",
"binary": false,
"copies": "1",
"ref": "refs/heads/lbaasv2-mitaka-pull-request",
"path": "gbpservice/nfp/configurator/agents/firewall.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1741199"
},
{
"name": "Shell",
"bytes": "27976"
}
],
"symlink_target": ""
} |
import javabridge
import bioformats
from collections import OrderedDict
import numpy as np
from tqdm import tqdm
from xml.etree import cElementTree as etree
class LIFContainer:
def __init__(self, filename):
self.filename = filename
self.metadata_in_xml = bioformats.get_omexml_metadata(filename)
self.metadata_in_xml = self.metadata_in_xml.replace(u'\xb5', ' ')
self.xml = etree.fromstring(self.metadata_in_xml)
self.lif_img_data = self.get_image_data()
self.lif_series_order = list(self.lif_img_data.keys())
def get_image_data(self):
data = OrderedDict()
ome_metadata = bioformats.omexml.OMEXML(self.metadata_in_xml)
for i in range(ome_metadata.image_count):
image = ome_metadata.image(i)
data[image.Name] = {
"ID": image.ID,
"X": image.Pixels.SizeX,
"Y": image.Pixels.SizeY,
"C": image.Pixels.SizeC,
"Z": image.Pixels.SizeZ,
"T": image.Pixels.SizeT,
}
# sa = ome_metadata.structured_annotations
return data
def get_image(self, c=0, t=0, z=0, series_id=0):
with bioformats.ImageReader(self.filename) as reader:
return reader.read(c=c, t=t, z=z, series=series_id)
def get_full_array(self, series_name, progressbar=False):
shape = self.lif_img_data[series_name]
img = np.zeros(tuple(map(shape.get, ['X', 'Y', 'C', 'T'])))
current_series_id = self.lif_series_order.index(series_name)
if progressbar:
_time = tqdm(range(shape['T']))
else:
_time = range(shape['T'])
for t in _time:
for c in range(shape['C']):
img[:, :, c, t] = self.get_image(t=t, c=c,
series_id=current_series_id)
return img
def get_rois_from_oemxml(self):
result = {}
for elem in self.xml:
if elem.tag.endswith('ROI'):
roi = elem
result[roi.attrib['ID']] = []
for shape in roi[0]:
if shape[0].tag.endswith('Label'):
attribs = shape[0].attrib
result[roi.attrib['ID']].append(
{'X': float(attribs['X']),
'Y': float(attribs['Y'])})
elif shape[0].tag.endswith('Polygon'):
points_txt = shape[0].attrib['Points']
points = np.array(
[map(float, pair.split(',')) for pair in
points_txt.split(' ')])
result[roi.attrib['ID']].append({'Polygon': points})
elif shape[0].tag.endswith('Ellipse'):
result[roi.attrib['ID']].append(
{
k: float(shape[0].attrib[k])
for k in ('RadiusX', 'RadiusY', 'X', 'Y')
}
)
result[roi.attrib['ID']][0].update({'type': 'Ellipse'})
return result
def _get_structured_annotations(self):
ordered_structure = OrderedDict()
structured_annotations = next(
sa for sa in self.xml
if sa.tag.endswith('StructuredAnnotations'))
for sa in structured_annotations:
cur = ordered_structure
key, value = sa[0][0]
keys = key.text.split('|')
for _key in keys[:-1]:
if _key not in cur:
cur[_key] = OrderedDict()
cur = cur[_key]
cur[keys[-1]] = value.text
return ordered_structure
def get_structured_annotations(self):
full_structured_annotation = OrderedDict()
original_structured_annotation = self._get_structured_annotations()
for series_name in self.lif_series_order:
full_structured_annotation[series_name] = OrderedDict()
metadata_to_remove = []
for metadata, sub_dict in original_structured_annotation.items():
if metadata.startswith(series_name):
full_structured_annotation[series_name][
metadata[len(series_name) + 1:]] = sub_dict
metadata_to_remove.append(metadata)
for metadata in metadata_to_remove:
original_structured_annotation.pop(metadata, None)
if original_structured_annotation:
full_structured_annotation['other'] = \
original_structured_annotation
return full_structured_annotation
def start_bioformats():
javabridge.start_vm(class_path=bioformats.JARS)
print('BioContainer started')
def stop_bioformats():
javabridge.kill_vm()
print('BioContainer closed')
| {
"content_hash": "5b0fe30cdfffa1add623c1e1f003d5a6",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 79,
"avg_line_length": 39.26190476190476,
"alnum_prop": 0.5215281989084294,
"repo_name": "gangiman/FRAPanalyzer",
"id": "83f5cb3fc004a9f12f40581183b7a3290e2a70d1",
"size": "4971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/FRAPanalyzer/lifproc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "90"
},
{
"name": "Jupyter Notebook",
"bytes": "33038580"
},
{
"name": "Python",
"bytes": "20083"
},
{
"name": "Shell",
"bytes": "303"
}
],
"symlink_target": ""
} |
from __future__ import division, unicode_literals
import unittest
import os
import warnings
from pymatgen.command_line.enumlib_caller import EnumlibAdaptor
from pymatgen import Element, Structure
from pymatgen.transformations.standard_transformations import \
SubstitutionTransformation
from monty.os.path import which
from pymatgen.transformations.site_transformations import \
RemoveSitesTransformation
from pymatgen.util.testing import PymatgenTest
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
"""
Created on Jul 22, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 22, 2012"
enum_cmd = which('enum.x') or which('multienum.x')
makestr_cmd = which('makestr.x') or which('makeStr.x') or which('makeStr.py')
enumlib_present = enum_cmd and makestr_cmd
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
@unittest.skipIf(not enumlib_present, "enum_lib not present.")
class EnumlibAdaptorTest(PymatgenTest):
def test_init(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
struct = self.get_structure("LiFePO4")
subtrans = SubstitutionTransformation({'Li': {'Li': 0.5}})
adaptor = EnumlibAdaptor(subtrans.apply_transformation(struct), 1, 2)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 86)
for s in structures:
self.assertAlmostEqual(
s.composition.get_atomic_fraction(Element("Li")), 0.5 / 6.5)
adaptor = EnumlibAdaptor(subtrans.apply_transformation(struct), 1, 2,
refine_structure=True)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 52)
subtrans = SubstitutionTransformation({'Li': {'Li': 0.25}})
adaptor = EnumlibAdaptor(subtrans.apply_transformation(struct), 1, 1,
refine_structure=True)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 1)
for s in structures:
self.assertAlmostEqual(s.composition
.get_atomic_fraction(Element("Li")),
0.25 / 6.25)
# Make sure it works for completely disordered structures.
struct = Structure([[10, 0, 0], [0, 10, 0], [0, 0, 10]], [{'Fe': 0.5}],
[[0, 0, 0]])
adaptor = EnumlibAdaptor(struct, 1, 2)
adaptor.run()
self.assertEqual(len(adaptor.structures), 3)
# Make sure it works properly when symmetry is broken by ordered sites.
struct = self.get_structure("LiFePO4")
subtrans = SubstitutionTransformation({'Li': {'Li': 0.25}})
s = subtrans.apply_transformation(struct)
# REmove some ordered sites to break symmetry.
removetrans = RemoveSitesTransformation([4, 7])
s = removetrans.apply_transformation(s)
adaptor = EnumlibAdaptor(s, 1, 1, enum_precision_parameter=0.01)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 4)
struct = Structure([[3, 0, 0], [0, 3, 0], [0, 0, 3]],
[{"Si": 0.5}] * 2, [[0, 0, 0], [0.5, 0.5, 0.5]])
adaptor = EnumlibAdaptor(struct, 1, 3, enum_precision_parameter=0.01)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 10)
struct = Structure.from_file(
os.path.join(test_dir, "EnumerateTest.json"))
adaptor = EnumlibAdaptor(struct, 1, 1)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 2)
def test_partial_disorder(self):
s = Structure.from_file(filename=os.path.join(test_dir, "garnet.cif"))
a = SpacegroupAnalyzer(s, 0.1)
prim = a.find_primitive()
s = prim.copy()
s["Al3+"] = {"Al3+": 0.5, "Ga3+": 0.5}
adaptor = EnumlibAdaptor(s, 1, 1, enum_precision_parameter=0.01)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 7)
for s in structures:
self.assertEqual(s.formula, 'Ca12 Al4 Ga4 Si12 O48')
s = prim.copy()
s["Ca2+"] = {"Ca2+": 1/3, "Mg2+": 2/3}
adaptor = EnumlibAdaptor(s, 1, 1, enum_precision_parameter=0.01)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 20)
for s in structures:
self.assertEqual(s.formula, 'Ca4 Mg8 Al8 Si12 O48')
s = prim.copy()
s["Si4+"] = {"Si4+": 1/3, "Ge4+": 2/3}
adaptor = EnumlibAdaptor(s, 1, 1, enum_precision_parameter=0.01)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 18)
for s in structures:
self.assertEqual(s.formula, 'Ca12 Al8 Si4 Ge8 O48')
def test_timeout(self):
s = Structure.from_file(filename=os.path.join(test_dir, "garnet.cif"))
a = SpacegroupAnalyzer(s, 0.1)
s["Al3+"] = {"Al3+": 0.5, "Ga3+": 0.5}
adaptor = EnumlibAdaptor(s, 1, 1, enum_precision_parameter=0.01,
timeout=0.000001)
self.assertRaises(TimeoutError, adaptor._run_multienum)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "96550e12655feda614dc5bdf57ca0c09",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 83,
"avg_line_length": 42.489208633093526,
"alnum_prop": 0.5765323399932272,
"repo_name": "gpetretto/pymatgen",
"id": "0d6338ffe33c993299cb1b14bec262cb4d09c932",
"size": "6016",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pymatgen/command_line/tests/test_enumlib_caller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "6711108"
},
{
"name": "Roff",
"bytes": "1135003"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.