hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aced372333316ae0d4f0fcc38c15a5e15d682d72 | 625 | py | Python | tests/test_text.py | bjk7119/fosslight_util | a0e612ecbd88a865b7703c7d71ca87603ca8bbd0 | [
"Apache-2.0"
] | null | null | null | tests/test_text.py | bjk7119/fosslight_util | a0e612ecbd88a865b7703c7d71ca87603ca8bbd0 | [
"Apache-2.0"
] | 1 | 2021-06-19T10:35:03.000Z | 2021-06-19T10:35:03.000Z | tests/test_text.py | soimkim/fosslight_util | 0306e56f6d289a83aa8c6a0383ff4a929f85e701 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021 LG Electronics Inc.
# SPDX-License-Identifier: Apache-2.0
import os
from fosslight_util.set_log import init_log
from fosslight_util.write_txt import write_txt_file
def main():
output_dir = "test_result/txt"
logger = init_log(os.path.join(output_dir, "log.txt"))
logger.warning("TESTING - writing text file")
success, error_msg = write_txt_file(
os.path.join(output_dir, "test.txt"), "Testing - Writing text in a file.")
logger.warning("Result:" + str(success) + ", error_msg:"+error_msg)
if __name__ == '__main__':
main()
| 29.761905 | 82 | 0.696 |
aced3725737c49d40d59fb7f7ba82bd6aef7c306 | 138 | py | Python | chat/routing.py | Rutujakadam0204/video_conferencing | 7c0ea87d16195bc565fc7fd7f389a431a3e47df2 | [
"MIT"
] | null | null | null | chat/routing.py | Rutujakadam0204/video_conferencing | 7c0ea87d16195bc565fc7fd7f389a431a3e47df2 | [
"MIT"
] | null | null | null | chat/routing.py | Rutujakadam0204/video_conferencing | 7c0ea87d16195bc565fc7fd7f389a431a3e47df2 | [
"MIT"
] | null | null | null | from django.urls import re_path
from . import consumers
websocket_urlpatterns = [
re_path(r'', consumers.ChatConsumer.as_asgi()),
]
| 17.25 | 51 | 0.746377 |
aced37542cf380d112d8e0ce1afe0f784cd1b43f | 5,363 | py | Python | texar/torch/utils/dtypes.py | tanyuqian/texar-pytorch | 4463ab6d50504606e627fd7589cbcfb0308cf09b | [
"Apache-2.0"
] | null | null | null | texar/torch/utils/dtypes.py | tanyuqian/texar-pytorch | 4463ab6d50504606e627fd7589cbcfb0308cf09b | [
"Apache-2.0"
] | null | null | null | texar/torch/utils/dtypes.py | tanyuqian/texar-pytorch | 4463ab6d50504606e627fd7589cbcfb0308cf09b | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions related to data types.
"""
from typing import Any, Dict, Optional, Union
import numpy as np
import torch
from texar.torch.hyperparams import HParams
__all__ = [
'torch_bool',
'get_numpy_dtype',
'is_str',
'is_callable',
'get_supported_scalar_types',
'maybe_hparams_to_dict',
'compat_as_text',
]
# `torch.bool` exists in PyTorch 1.1, but the default type for comparisons
# is still `torch.uint8`.
torch_bool = (torch.empty(()) < 0).dtype
DTYPE_MAP = {
np.float32: ['float32', 'float', 'tf.float32', 'torch.float',
'torch.float32', float, np.float32, torch.float32],
np.float64: ['float64', 'tf.float64', 'torch.float64', np.float64,
np.float_, torch.float64],
np.float16: ['float16', 'tf.float16', 'torch.float16', np.float16,
torch.float16],
np.int32: ['int', 'int32', 'tf.int32', 'torch.int', 'torch.int32', int,
np.int32, torch.int32],
np.int64: ['int64', 'tf.int64', 'torch.int64', np.int64, np.int_,
torch.int64],
np.int16: ['int16', 'tf.int16', 'torch.int16', np.int16, torch.int16],
np.int8: ['int8', 'char', 'tf.int8', 'torch.int8', np.int8, torch.int8],
np.uint8: ['uint8', 'tf.uint8', 'torch.uint8', np.uint8, torch.uint8],
np.bool_: ['bool', 'tf.bool', 'torch.bool', bool, np.bool_,
torch_bool],
np.str_: ['string', 'str', 'tf.string', str, np.str_],
np.bytes_: ['bytes', 'np.bytes', bytes, np.bytes_]
}
def get_numpy_dtype(dtype: Union[str, type]):
r"""Returns equivalent NumPy dtype.
Args:
dtype: A str, Python numeric or string type, NumPy data type, or
PyTorch dtype.
Returns:
The corresponding NumPy dtype.
"""
for np_dtype, valid_values in DTYPE_MAP.items():
if dtype in valid_values:
return np_dtype
raise ValueError(
f"Unsupported conversion from type {dtype!s} to NumPy dtype")
def is_callable(x):
r"""Return `True` if :attr:`x` is callable.
"""
return callable(x)
def is_str(x):
r"""Returns `True` if :attr:`x` is either a str or unicode.
Returns `False` otherwise.
"""
return isinstance(x, str)
def get_supported_scalar_types():
r"""Returns a list of scalar types supported.
"""
types = []
for key, value in DTYPE_MAP.items():
if key not in {np.str_, np.bytes_}:
types.extend(value)
return types
def maybe_hparams_to_dict(hparams: Optional[Union[HParams, Dict[str, Any]]]) \
-> Optional[Dict[str, Any]]:
r"""If :attr:`hparams` is an instance of :class:`~texar.torch.HParams`,
converts it to a ``dict`` and returns. If :attr:`hparams` is a ``dict``,
returns as is.
Args:
hparams: The :class:`~texar.torch.HParams` instance to convert.
Returns:
dict: The corresponding ``dict`` instance
"""
if hparams is None:
return None
if isinstance(hparams, dict):
return hparams
return hparams.todict()
def _maybe_list_to_array(str_list, dtype_as):
if isinstance(dtype_as, (list, tuple)):
return type(dtype_as)(str_list)
elif isinstance(dtype_as, np.ndarray):
return np.array(str_list)
else:
return str_list
def _as_text(bytes_or_text, encoding='utf-8'):
r"""Returns the given argument as a unicode string.
Adapted from ``tensorflow.compat.as_text``.
Args:
bytes_or_text: A ``bytes``, ``str``, or ``unicode`` object.
encoding: A string indicating the charset for decoding unicode.
Returns:
A ``unicode`` (Python 2) or ``str`` (Python 3) object.
Raises:
TypeError: If ``bytes_or_text`` is not a binary or unicode string.
"""
if isinstance(bytes_or_text, str):
return bytes_or_text
elif isinstance(bytes_or_text, bytes):
return bytes_or_text.decode(encoding)
else:
raise TypeError(
f"Expected binary or unicode string, got {bytes_or_text!r}")
def compat_as_text(str_):
r"""Converts strings into ``unicode`` (Python 2) or ``str`` (Python 3).
Args:
str\_: A string or other data types convertible to string, or an
`n`-D numpy array or (possibly nested) list of such elements.
Returns:
The converted strings of the same structure/shape as :attr:`str_`.
"""
def _recur_convert(s):
if isinstance(s, (list, tuple, np.ndarray)):
s_ = [_recur_convert(si) for si in s]
return _maybe_list_to_array(s_, s)
else:
try:
return _as_text(s)
except TypeError:
return _as_text(str(s))
text = _recur_convert(str_)
return text
| 30.129213 | 78 | 0.631736 |
aced38bf023797ed50aedb0568d485a2e2657194 | 1,428 | py | Python | Shapes.py | Mandar-Sharma/Recon | a2d43afd6ca3d6a94208392ac95ea4656e013e59 | [
"MIT"
] | null | null | null | Shapes.py | Mandar-Sharma/Recon | a2d43afd6ca3d6a94208392ac95ea4656e013e59 | [
"MIT"
] | null | null | null | Shapes.py | Mandar-Sharma/Recon | a2d43afd6ca3d6a94208392ac95ea4656e013e59 | [
"MIT"
] | null | null | null | __author__ = 'Mandar'
import pygame
import numpy as np
import math as m
from pygame import gfxdraw
import Vertices
import copy
class Creates(object):
def __init__(self):
pass
#Using Bressenham's
def line(self, surface, X1, Y1, X2, Y2, color):
x1 = copy.copy(X1)
y1= copy.copy (Y1)
x2 = copy.copy(X2)
y2 = copy.copy(Y2)
gfxdraw.pixel(surface, x1, y1, color)
dx = abs(x2 - x1)
dy = abs(y2 - y1)
if (x2 > x1):
xinc = 1
else:
xinc = -1
if (y2 > y1):
yinc = 1
else:
yinc = -1
x1t = x1
y1t = y1
if (dx > dy):
p = 2 * dy - dx
k = 0
while k <= dx:
x1t += xinc
if p < 0:
p += 2 * dy
else:
y1t += yinc
p += 2 * dy - 2 * dx
gfxdraw.pixel(surface, x1t, y1t, color)
k += 1
else:
p = 2 * dx - dy
k = 0
while k <= dy:
y1t += yinc
if p < 0:
p += 2 * dx
else:
x1t += xinc
p += 2 * dx - 2 * dy
gfxdraw.pixel(surface, x1t, y1t, color)
k += 1
| 23.8 | 56 | 0.352241 |
aced38dad51b6bb8f790093f4aa051ea6dcc2eda | 4,700 | py | Python | sib_api_v3_sdk/models/create_list.py | Danilka/APIv3-python-library | c96472f47d652d2e09e8b4a48a80e33fde06e7f1 | [
"MIT"
] | null | null | null | sib_api_v3_sdk/models/create_list.py | Danilka/APIv3-python-library | c96472f47d652d2e09e8b4a48a80e33fde06e7f1 | [
"MIT"
] | null | null | null | sib_api_v3_sdk/models/create_list.py | Danilka/APIv3-python-library | c96472f47d652d2e09e8b4a48a80e33fde06e7f1 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
SendinBlue API
SendinBlue provide a RESTFul API that can be used with any languages. With this API, you will be able to : - Manage your campaigns and get the statistics - Manage your contacts - Send transactional Emails and SMS - and much more... You can download our wrappers at https://github.com/orgs/sendinblue **Possible responses** | Code | Message | | :-------------: | ------------- | | 200 | OK. Successful Request | | 201 | OK. Successful Creation | | 202 | OK. Request accepted | | 204 | OK. Successful Update/Deletion | | 400 | Error. Bad Request | | 401 | Error. Authentication Needed | | 402 | Error. Not enough credit, plan upgrade needed | | 403 | Error. Permission denied | | 404 | Error. Object does not exist | | 405 | Error. Method not allowed | # noqa: E501
OpenAPI spec version: 3.0.0
Contact: contact@sendinblue.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CreateList(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'folder_id': 'int'
}
attribute_map = {
'name': 'name',
'folder_id': 'folderId'
}
def __init__(self, name=None, folder_id=None): # noqa: E501
"""CreateList - a model defined in Swagger""" # noqa: E501
self._name = None
self._folder_id = None
self.discriminator = None
self.name = name
self.folder_id = folder_id
@property
def name(self):
"""Gets the name of this CreateList. # noqa: E501
Name of the list # noqa: E501
:return: The name of this CreateList. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CreateList.
Name of the list # noqa: E501
:param name: The name of this CreateList. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def folder_id(self):
"""Gets the folder_id of this CreateList. # noqa: E501
Id of the parent folder in which this list is to be created # noqa: E501
:return: The folder_id of this CreateList. # noqa: E501
:rtype: int
"""
return self._folder_id
@folder_id.setter
def folder_id(self, folder_id):
"""Sets the folder_id of this CreateList.
Id of the parent folder in which this list is to be created # noqa: E501
:param folder_id: The folder_id of this CreateList. # noqa: E501
:type: int
"""
if folder_id is None:
raise ValueError("Invalid value for `folder_id`, must not be `None`") # noqa: E501
self._folder_id = folder_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.413793 | 820 | 0.57 |
aced3960cdef6ec1dc979c5fb57a6a1735220e81 | 255 | py | Python | django_check_db/_version.py | mintel/django-check-db | 282a61e2aa6f74f96f256a2331df1dda178abe4d | [
"MIT"
] | 1 | 2018-12-29T17:59:07.000Z | 2018-12-29T17:59:07.000Z | django_check_db/_version.py | mintel/django-check-db | 282a61e2aa6f74f96f256a2331df1dda178abe4d | [
"MIT"
] | null | null | null | django_check_db/_version.py | mintel/django-check-db | 282a61e2aa6f74f96f256a2331df1dda178abe4d | [
"MIT"
] | null | null | null | # noqa
__title__ = 'django-check-db'
__version__ = '0.1.0-dev'
__summary__ = 'Django management command to test database connections'
__author__ = 'Jaye Doepke'
__author_email__ = 'jdoepke@mintel.com'
__uri__ = 'https://github.com/mintel/django-check-db'
| 31.875 | 70 | 0.760784 |
aced396537b960f5b6cca6237ab0bcfdb6b8d52f | 1,983 | py | Python | devday/talk/migrations/0043_auto_20200310_1737.py | jenslauterbach/devday_website | a827c9237e656842542eff07ec9fa7b39716a0ee | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 6 | 2018-09-30T20:18:01.000Z | 2020-03-12T09:03:38.000Z | devday/talk/migrations/0043_auto_20200310_1737.py | jenslauterbach/devday_website | a827c9237e656842542eff07ec9fa7b39716a0ee | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 260 | 2018-09-30T14:17:57.000Z | 2022-03-04T13:48:34.000Z | devday/talk/migrations/0043_auto_20200310_1737.py | jenslauterbach/devday_website | a827c9237e656842542eff07ec9fa7b39716a0ee | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 9 | 2018-09-30T13:17:21.000Z | 2020-10-03T12:55:05.000Z | # Generated by Django 2.2.10 on 2020-03-10 17:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('talk', '0042_delete_attendee_feedback_with_attendee'),
]
operations = [
migrations.AlterField(
model_name='attendeefeedback',
name='attendee',
field=models.ForeignKey(blank=True, limit_choices_to={'event__published': True}, null=True, on_delete=django.db.models.deletion.CASCADE, to='attendee.Attendee', verbose_name='Attendee'),
),
migrations.AlterField(
model_name='attendeefeedback',
name='talk',
field=models.ForeignKey(limit_choices_to={'track__is_null': False}, on_delete=django.db.models.deletion.CASCADE, to='talk.Talk', verbose_name='Talk'),
),
migrations.AlterField(
model_name='attendeevote',
name='attendee',
field=models.ForeignKey(limit_choices_to={'event__published': True}, on_delete=django.db.models.deletion.CASCADE, to='attendee.Attendee', verbose_name='Attendee'),
),
migrations.AlterField(
model_name='attendeevote',
name='talk',
field=models.ForeignKey(limit_choices_to={'track__is_null': False}, on_delete=django.db.models.deletion.CASCADE, to='talk.Talk', verbose_name='Talk'),
),
migrations.AlterField(
model_name='sessionreservation',
name='attendee',
field=models.ForeignKey(limit_choices_to={'event__published': True}, on_delete=django.db.models.deletion.CASCADE, to='attendee.Attendee', verbose_name='Attendee'),
),
migrations.AlterField(
model_name='sessionreservation',
name='talk',
field=models.ForeignKey(limit_choices_to={'spots__gt': 0}, on_delete=django.db.models.deletion.CASCADE, to='talk.Talk', verbose_name='Talk'),
),
]
| 44.066667 | 198 | 0.654564 |
aced39c423f3c481de1b67568d1557613e4fb0d1 | 8,366 | py | Python | Telegram/Telegram/settings.py | fgetwewr/Telegram | ce735b34e1e6475fe5116de7914ae7de024c16cb | [
"Apache-2.0"
] | null | null | null | Telegram/Telegram/settings.py | fgetwewr/Telegram | ce735b34e1e6475fe5116de7914ae7de024c16cb | [
"Apache-2.0"
] | null | null | null | Telegram/Telegram/settings.py | fgetwewr/Telegram | ce735b34e1e6475fe5116de7914ae7de024c16cb | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Scrapy settings for Telegram project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'Telegram'
SPIDER_MODULES = ['Telegram.spiders']
NEWSPIDER_MODULE = 'Telegram.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X x.y; rv:42.0) Gecko/20100101 Firefox/42.0'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 2
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
# 'Telegram.middlewares.TelegramSpiderMiddleware': 543,
'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': None
}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'Telegram.middlewares.TelegramDownloaderMiddleware': 543,
# 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': None
# 'scrapy.downloadermiddleware.httpproxy.HttpProxyMiddleware': None,
# 'Telegram.middlewares.ProxyMiddleWare': 110,
# 设置不参与scrapy的自动重试的动作
# 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None
# 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': None,
# 'scrapy.downloadermiddlewares.redirect': None,
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'Telegram.middlewares.RandomUserAgentMiddleware': 400,
}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
# 'Telegram.pipelines.TelegramPipeline': 300,
# 'Telegram.pipelines.TelegramtxtPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
HTTPERROR_ALLOWED_CODES = [302]
# mysql数据库的配置参数
MYSQL_HOST = '192.168.52.110'
MYSQL_PORT = 3306
MYSQL_USER = 'superman'
MYSQL_PASSWORD = '123456'
MYSQL_DATABASE = 'tg'
MY_USER_AGENT = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
"Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",
]
| 54.679739 | 209 | 0.718862 |
aced39d67ea7c7267df50bcb99bc7a7497f39805 | 10,804 | py | Python | neqsim/process/processTools.py | asmfstatoil/neqsimpython | 7b7ae710507051509eee902aa87c0d0a9b5732cb | [
"Apache-2.0"
] | null | null | null | neqsim/process/processTools.py | asmfstatoil/neqsimpython | 7b7ae710507051509eee902aa87c0d0a9b5732cb | [
"Apache-2.0"
] | null | null | null | neqsim/process/processTools.py | asmfstatoil/neqsimpython | 7b7ae710507051509eee902aa87c0d0a9b5732cb | [
"Apache-2.0"
] | null | null | null | import jpype
import jpype.imports
from jpype.types import *
from neqsim.neqsimpython import jNeqSim
processoperations = jNeqSim.processSimulation.processSystem.ProcessSystem()
def stream(thermoSystem, name="stream ?", t=0, p=0):
if t != 0:
thermoSystem.setTemperature(t)
if p != 0:
thermoSystem.setPressure(p)
stream = jNeqSim.processSimulation.processEquipment.stream.Stream(thermoSystem)
stream.setName(name)
processoperations.add(stream)
return stream
def neqstream(thermoSystem, name="stream ?", t=0, p=0):
if t != 0:
thermoSystem.setTemperature(t)
if p != 0:
thermoSystem.setPressure(p)
stream = jNeqSim.processSimulation.processEquipment.stream.NeqStream(thermoSystem)
stream.setName(name)
processoperations.add(stream)
return stream
def recycle(teststream, name="recycle ?"):
recycle1 = jNeqSim.processSimulation.processEquipment.util.Recycle()
recycle1.addStream(teststream)
processoperations.add(recycle1)
return recycle1
def saturator(teststream, name="water saturator"):
streamsaturator = jNeqSim.processSimulation.processEquipment.util.StreamSaturatorUtil(teststream)
processoperations.add(streamsaturator)
return streamsaturator
def glycoldehydrationlmodule(teststream, name="TEG process"):
dehydrationlmodule = jNeqSim.processSimulation.processSystem.processModules.GlycolDehydrationlModule()
dehydrationlmodule.setName(name)
dehydrationlmodule.addInputStream("gasStreamToAbsorber", teststream)
processoperations.add(dehydrationlmodule)
return dehydrationlmodule
def openprocess(filename):
processoperations = jNeqSim.processSimulation.processSystem.ProcessSystem.open(filename)
return processoperations
def separator(teststream, name="separator ?"):
separator = jNeqSim.processSimulation.processEquipment.separator.Separator(
teststream)
separator.setName(name)
processoperations.add(separator)
return separator
def GORfitter(teststream, name="GOR fitter ?"):
GORfitter1 = jNeqSim.processSimulation.processEquipment.util.GORfitter(name, teststream)
GORfitter1.setName(name)
processoperations.add(GORfitter1)
return GORfitter1
def simpleTEGAbsorber(name="TEG absorber ?"):
absorber = jNeqSim.processSimulation.processEquipment.absorber.SimpleTEGAbsorber()
absorber.setName(name)
processoperations.add(absorber)
return absorber
def waterStripperColumn(name="water stripper ?"):
stripper = jNeqSim.processSimulation.processEquipment.absorber.WaterStripperColumn()
stripper.setName(name)
processoperations.add(stripper)
return stripper
def gasscrubber(teststream, name="scrubber ?"):
separator = jNeqSim.processSimulation.processEquipment.separator.GasScrubber(
teststream)
separator.setName(name)
processoperations.add(separator)
return separator
def separator3phase(teststream, name="separator ?"):
separator = jNeqSim.processSimulation.processEquipment.separator.ThreePhaseSeparator(teststream)
separator.setName(name)
processoperations.add(separator)
return separator
def valve(teststream, p=1.0, name="valve ?"):
valve = jNeqSim.processSimulation.processEquipment.valve.ThrottlingValve(
teststream)
valve.setOutletPressure(p)
valve.setName(name)
processoperations.add(valve)
return valve
def recycle2(name="recycle ?"):
recyc = jNeqSim.processSimulation.processEquipment.util.Recycle(name)
processoperations.add(recyc)
return recyc
def calculator(name="calculator ?"):
calc2 = jNeqSim.processSimulation.processEquipment.util.Calculator(name)
processoperations.add(calc2)
return calc2
def setpoint(name1, unit1, name2, unit2):
setp = jNeqSim.processSimulation.processEquipment.util.SetPoint(name1, unit1, name2, unit2)
processoperations.add(setp)
return setp
def filters(teststream):
filter2 = jNeqSim.processSimulation.processEquipment.filter.Filter(
teststream)
processoperations.add(filter2)
return filter2
def compressor(teststream, pres=10.0, name="compressor ?"):
compressor = jNeqSim.processSimulation.processEquipment.compressor.Compressor(teststream)
compressor.setOutletPressure(pres)
compressor.setName(name)
processoperations.add(compressor)
return compressor
def compressorChart(compressor, curveConditions, speed, flow, head, polyEff ):
compressor.getCompressorChart().setCurves(JDouble[:](curveConditions), JDouble[:](speed), JDouble[:][:](flow), JDouble[:][:](head), JDouble[:][:](polyEff))
def compressorSurgeCurve(compressor, curveConditions, surgeflow, surgehead):
compressor.getCompressorChart().getSurgeCurve().setCurve(
JDouble[:](curveConditions), JDouble[:](surgeflow), JDouble[:](surgehead))
def compressorStoneWallCurve(compressor, curveConditions, stoneWallflow, stoneWallHead):
compressor.getCompressorChart().getStoneWallCurve().setCurve(JDouble[:](curveConditions), JDouble[:](stoneWallflow), JDouble[:](stoneWallHead))
def pump(teststream, p=1.0, name="pump ?"):
pump = jNeqSim.processSimulation.processEquipment.pump.Pump(teststream)
pump.setOutletPressure(p)
pump.setName(name)
processoperations.add(pump)
return pump
def expander(teststream, p, name="expander ?"):
expander = jNeqSim.processSimulation.processEquipment.expander.Expander(teststream)
expander.setOutletPressure(p)
expander.setName(name)
processoperations.add(expander)
return expander
def mixer(name=""):
mixer = jNeqSim.processSimulation.processEquipment.mixer.StaticMixer()
mixer.setName(name)
processoperations.add(mixer)
return mixer
def phasemixer(name=""):
mixer = jNeqSim.processSimulation.processEquipment.mixer.StaticPhaseMixer()
mixer.setName(name)
processoperations.add(mixer)
return mixer
def nequnit(teststream, equipment="pipeline", flowpattern="stratified", numberOfNodes=100):
neqUn = jNeqSim.processSimulation.processEquipment.util.NeqSimUnit(teststream, equipment, flowpattern)
neqUn.setNumberOfNodes(numberOfNodes)
processoperations.add(neqUn)
return neqUn
def splitter(teststream, splitfactors, name=""):
splitter = jNeqSim.processSimulation.processEquipment.splitter.Splitter(
teststream)
splitter.setSplitNumber(len(splitfactors))
splitter.setSplitFactors(JDouble[:](splitfactors))
splitter.setName(name)
processoperations.add(splitter)
return splitter
def heater(teststream, name=""):
heater = jNeqSim.processSimulation.processEquipment.heatExchanger.Heater(teststream)
heater.setName(name)
processoperations.add(heater)
return heater
def simplereservoir(fluid, name="Reservoir 1", gasvolume=10.0 * 1e7, oilvolume=120.0 * 1e6, watervolume=10.0e6):
reserv = jNeqSim.processSimulation.processEquipment.reservoir.SimpleReservoir(
name)
reserv.setReservoirFluid(fluid, gasvolume, oilvolume, watervolume)
processoperations.add(reserv)
return reserv
def cooler(teststream, name=""):
cooler = jNeqSim.processSimulation.processEquipment.heatExchanger.Cooler(teststream)
cooler.setName(name)
processoperations.add(cooler)
return cooler
def heatExchanger(stream1, stream2=None, name=""):
if stream2 == None:
heater = jNeqSim.processSimulation.processEquipment.heatExchanger.HeatExchanger(
stream1)
else:
heater = jNeqSim.processSimulation.processEquipment.heatExchanger.HeatExchanger(
stream1, stream2)
heater.setName(name)
processoperations.add(heater)
return heater
def distillationColumn(trays=5, reboil=True, condenser=True, name="destColumn"):
distillationColumn = jNeqSim.processSimulation.processEquipment.distillation.DistillationColumn(trays, reboil, condenser)
distillationColumn.setName(name)
processoperations.add(distillationColumn)
return distillationColumn
def neqheater(teststream, name=""):
neqheater = jNeqSim.processSimulation.processEquipment.heatExchanger.NeqHeater(
teststream)
neqheater.setName(name)
processoperations.add(neqheater)
return neqheater
def twophasepipe(teststream, position, diameter, height, outTemp, rough):
pipe = jNeqSim.processSimulation.processEquipment.pipeline.TwoPhasePipeLine(teststream)
pipe.setOutputFileName("c:/tempNew20.nc")
pipe.setInitialFlowPattern("annular")
numberOfLegs = len(position) - 1
numberOfNodesInLeg = 60
pipe.setNumberOfLegs(numberOfLegs)
pipe.setNumberOfNodesInLeg(numberOfNodesInLeg)
pipe.setLegPositions(position)
pipe.setHeightProfile(height)
pipe.setPipeDiameters(diameter)
pipe.setPipeWallRoughness(rough)
pipe.setOuterTemperatures(outTemp)
pipe.setEquilibriumMassTransfer(0)
pipe.setEquilibriumHeatTransfer(1)
processoperations.add(pipe)
return pipe
def pipe(teststream, length, deltaElevation, diameter, rough):
pipe = jNeqSim.processSimulation.processEquipment.pipeline.AdiabaticPipe(
teststream)
pipe.setDiameter(diameter)
pipe.setLength(length)
pipe.setPipeWallRoughness(rough)
pipe.setInletElevation(0.0)
pipe.setOutletElevation(deltaElevation)
processoperations.add(pipe)
return pipe
def pipeline(teststream, position, diameter, height, outTemp, rough, outerHeatTransferCoefficients, pipeWallHeatTransferCoefficients, numberOfNodesInLeg = 50):
pipe = jNeqSim.processSimulation.processEquipment.pipeline.OnePhasePipeLine(teststream)
pipe.setOutputFileName("c:/tempNew20.nc")
numberOfLegs = len(position) - 1
pipe.setNumberOfLegs(numberOfLegs)
pipe.setNumberOfNodesInLeg(numberOfNodesInLeg)
pipe.setLegPositions(JDouble[:](position))
pipe.setHeightProfile(JDouble[:](height))
pipe.setPipeDiameters(JDouble[:](diameter))
pipe.setPipeWallRoughness(JDouble[:](rough))
pipe.setPipeOuterHeatTransferCoefficients(JDouble[:](outerHeatTransferCoefficients))
pipe.setPipeWallHeatTransferCoefficients(JDouble[:](pipeWallHeatTransferCoefficients))
pipe.setOuterTemperatures(JDouble[:](outTemp))
processoperations.add(pipe)
return pipe
def clear():
processoperations.clearAll()
def run():
processoperations.run()
def clearProcess():
processoperations.clearAll()
def runProcess():
processoperations.run()
def runProcessAsThread(process):
Thread = jpype.JPackage('java.lang.Thread')
threadProcess = Thread(process)
threadProcess.run()
return threadProcess
def getProcess():
return processoperations
def runtrans():
processoperations.runTransient()
def view():
processoperations.displayResult()
def viewProcess():
processoperations.displayResult()
| 32.059347 | 159 | 0.759904 |
aced3a45a1b7c72d5c64d70a0a5dbe23b7972bf5 | 14,397 | py | Python | vispy/app/backends/_wx.py | sevas/vispy | 572158619c04809a26993e808b7af25b2b426ba1 | [
"BSD-3-Clause"
] | null | null | null | vispy/app/backends/_wx.py | sevas/vispy | 572158619c04809a26993e808b7af25b2b426ba1 | [
"BSD-3-Clause"
] | null | null | null | vispy/app/backends/_wx.py | sevas/vispy | 572158619c04809a26993e808b7af25b2b426ba1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
vispy backend for wxPython.
"""
from __future__ import division
from time import sleep
import gc
import warnings
from ..base import (BaseApplicationBackend, BaseCanvasBackend,
BaseTimerBackend)
from ...util import keys, logger
from ...util.ptime import time
from ... import config
USE_EGL = config['gl_backend'].lower().startswith('es')
# -------------------------------------------------------------------- init ---
try:
# avoid silly locale warning on OSX
with warnings.catch_warnings(record=True):
import wx
from wx import glcanvas
from wx.glcanvas import GLCanvas
# Map native keys to vispy keys
KEYMAP = {
wx.WXK_SHIFT: keys.SHIFT,
wx.WXK_CONTROL: keys.CONTROL,
wx.WXK_ALT: keys.ALT,
wx.WXK_WINDOWS_MENU: keys.META,
wx.WXK_LEFT: keys.LEFT,
wx.WXK_UP: keys.UP,
wx.WXK_RIGHT: keys.RIGHT,
wx.WXK_DOWN: keys.DOWN,
wx.WXK_PAGEUP: keys.PAGEUP,
wx.WXK_PAGEDOWN: keys.PAGEDOWN,
wx.WXK_INSERT: keys.INSERT,
wx.WXK_DELETE: keys.DELETE,
wx.WXK_HOME: keys.HOME,
wx.WXK_END: keys.END,
wx.WXK_ESCAPE: keys.ESCAPE,
wx.WXK_BACK: keys.BACKSPACE,
wx.WXK_F1: keys.F1,
wx.WXK_F2: keys.F2,
wx.WXK_F3: keys.F3,
wx.WXK_F4: keys.F4,
wx.WXK_F5: keys.F5,
wx.WXK_F6: keys.F6,
wx.WXK_F7: keys.F7,
wx.WXK_F8: keys.F8,
wx.WXK_F9: keys.F9,
wx.WXK_F10: keys.F10,
wx.WXK_F11: keys.F11,
wx.WXK_F12: keys.F12,
wx.WXK_SPACE: keys.SPACE,
wx.WXK_RETURN: keys.ENTER, # == pyglet.window.key.RETURN
wx.WXK_NUMPAD_ENTER: keys.ENTER,
wx.WXK_TAB: keys.TAB,
}
except Exception as exp:
available, testable, why_not, which = False, False, str(exp), None
class GLCanvas(object):
pass
else:
if USE_EGL:
available, testable, why_not = False, False, 'EGL not supported'
else:
available, testable, why_not = True, True, None
which = 'wxPython ' + str(wx.__version__)
# -------------------------------------------------------------- capability ---
capability = dict( # things that can be set by the backend
title=True,
size=True,
position=True,
show=True,
vsync=True,
resizable=True,
decorate=True,
fullscreen=True,
context=True,
multi_window=True,
scroll=True,
parent=True,
always_on_top=True,
)
# ------------------------------------------------------- set_configuration ---
def _set_config(c):
"""Set gl configuration"""
gl_attribs = [glcanvas.WX_GL_RGBA,
glcanvas.WX_GL_DEPTH_SIZE, c['depth_size'],
glcanvas.WX_GL_STENCIL_SIZE, c['stencil_size'],
glcanvas.WX_GL_MIN_RED, c['red_size'],
glcanvas.WX_GL_MIN_GREEN, c['green_size'],
glcanvas.WX_GL_MIN_BLUE, c['blue_size'],
glcanvas.WX_GL_MIN_ALPHA, c['alpha_size']]
gl_attribs += [glcanvas.WX_GL_DOUBLEBUFFER] if c['double_buffer'] else []
gl_attribs += [glcanvas.WX_GL_STEREO] if c['stereo'] else []
return gl_attribs
# ------------------------------------------------------------- application ---
_wx_app = None
_timers = []
class ApplicationBackend(BaseApplicationBackend):
def __init__(self):
BaseApplicationBackend.__init__(self)
self._event_loop = wx.EventLoop()
wx.EventLoop.SetActive(self._event_loop)
def _vispy_get_backend_name(self):
return 'wx'
def _vispy_process_events(self):
# inpsired by https://github.com/wxWidgets/wxPython/blob/master/
# samples/mainloop/mainloop.py
for _ in range(3): # trial-and-error found this to work (!)
while self._event_loop.Pending():
self._event_loop.Dispatch()
_wx_app.ProcessIdle()
sleep(0.01)
def _vispy_run(self):
return _wx_app.MainLoop()
def _vispy_quit(self):
global _wx_app
_wx_app.ExitMainLoop()
def _vispy_get_native_app(self):
# Get native app in save way. Taken from guisupport.py
global _wx_app
_wx_app = wx.GetApp() # in case the user already has one
if _wx_app is None:
_wx_app = wx.PySimpleApp()
_wx_app.SetExitOnFrameDelete(True)
return _wx_app
# ------------------------------------------------------------------ canvas ---
def _get_mods(evt):
"""Helper to extract list of mods from event"""
mods = []
mods += [keys.CONTROL] if evt.ControlDown() else []
mods += [keys.ALT] if evt.AltDown() else []
mods += [keys.SHIFT] if evt.ShiftDown() else []
mods += [keys.META] if evt.MetaDown() else []
return mods
def _process_key(evt):
"""Helper to convert from wx keycode to vispy keycode"""
key = evt.GetKeyCode()
if key in KEYMAP:
return KEYMAP[key], ''
if 97 <= key <= 122:
key -= 32
if key >= 32 and key <= 127:
return keys.Key(chr(key)), chr(key)
else:
return None, None
class DummySize(object):
def __init__(self, size):
self.size = size
def GetSize(self):
return self.size
def Skip(self):
pass
class CanvasBackend(GLCanvas, BaseCanvasBackend):
""" wxPython backend for Canvas abstract class."""
# args are for BaseCanvasBackend, kwargs are for us.
def __init__(self, *args, **kwargs):
BaseCanvasBackend.__init__(self, *args)
p = self._process_backend_kwargs(kwargs)
# WX supports OS double-click events, so we set this here to
# avoid double events
self._double_click_supported = True
# Set config
self._gl_attribs = _set_config(p.context.config)
# Deal with context
p.context.shared.add_ref('wx', self)
if p.context.shared.ref is self:
self._gl_context = None # set for real once we init the GLCanvas
else:
self._gl_context = p.context.shared.ref._gl_context
if p.parent is None:
style = (wx.MINIMIZE_BOX | wx.MAXIMIZE_BOX | wx.CLOSE_BOX |
wx.SYSTEM_MENU | wx.CAPTION | wx.CLIP_CHILDREN)
style |= wx.NO_BORDER if not p.decorate else wx.RESIZE_BORDER
style |= wx.STAY_ON_TOP if p.always_on_top else 0
self._frame = wx.Frame(None, wx.ID_ANY, p.title, p.position,
p.size, style)
if not p.resizable:
self._frame.SetSizeHints(p.size[0], p.size[1],
p.size[0], p.size[1])
if p.fullscreen is not False:
if p.fullscreen is not True:
logger.warning('Cannot specify monitor number for wx '
'fullscreen, using default')
self._fullscreen = True
else:
self._fullscreen = False
_wx_app.SetTopWindow(self._frame)
parent = self._frame
self._frame.Show()
self._frame.Raise()
self._frame.Bind(wx.EVT_CLOSE, self.on_close)
else:
parent = p.parent
self._frame = None
self._fullscreen = False
self._init = False
GLCanvas.__init__(self, parent, wx.ID_ANY, pos=p.position,
size=p.size, style=0, name='GLCanvas',
attribList=self._gl_attribs)
if self._gl_context is None:
self._gl_context = glcanvas.GLContext(self)
self.SetFocus()
self._vispy_set_title(p.title)
self._size = None
self.Bind(wx.EVT_SIZE, self.on_resize)
self.Bind(wx.EVT_PAINT, self.on_draw)
self.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
self.Bind(wx.EVT_KEY_UP, self.on_key_up)
self.Bind(wx.EVT_MOUSE_EVENTS, self.on_mouse_event)
self._size_init = p.size
self._vispy_set_visible(p.show)
def on_resize(self, event):
if self._vispy_canvas is None or not self._init:
event.Skip()
return
size = event.GetSize()
self._vispy_canvas.events.resize(size=size)
self.Refresh()
event.Skip()
def on_draw(self, event):
if self._vispy_canvas is None:
return
dc = wx.PaintDC(self) # needed for wx
if not self._init:
self._initialize()
self._vispy_canvas.set_current()
self._vispy_canvas.events.draw(region=None)
del dc
event.Skip()
def _initialize(self):
if self._vispy_canvas is None:
return
self._init = True
self._vispy_canvas.set_current()
self._vispy_canvas.events.initialize()
self.on_resize(DummySize(self._size_init))
def _vispy_set_current(self):
self.SetCurrent(self._gl_context)
def _vispy_warmup(self):
etime = time() + 0.3
while time() < etime:
sleep(0.01)
self._vispy_canvas.set_current()
self._vispy_canvas.app.process_events()
def _vispy_swap_buffers(self):
# Swap front and back buffer
self._vispy_canvas.set_current()
self.SwapBuffers()
def _vispy_set_title(self, title):
# Set the window title. Has no effect for widgets
if self._frame is not None:
self._frame.SetLabel(title)
def _vispy_set_size(self, w, h):
# Set size of the widget or window
if not self._init:
self._size_init = (w, h)
self.SetSizeWH(w, h)
def _vispy_set_position(self, x, y):
# Set positionof the widget or window. May have no effect for widgets
if self._frame is not None:
self._frame.SetPosition((x, y))
def _vispy_get_fullscreen(self):
return self._fullscreen
def _vispy_set_fullscreen(self, fullscreen):
if self._frame is not None:
self._fullscreen = bool(fullscreen)
self._vispy_set_visible(True)
def _vispy_set_visible(self, visible):
# Show or hide the window or widget
self.Show(visible)
if visible:
if self._frame is not None:
self._frame.ShowFullScreen(self._fullscreen)
def _vispy_update(self):
# Invoke a redraw
self.Refresh()
def _vispy_close(self):
if self._vispy_canvas is None:
return
# Force the window or widget to shut down
canvas = self
frame = self._frame
self._gl_context = None # let RC destroy this in case it's shared
canvas.Close()
canvas.Destroy()
if frame:
frame.Close()
frame.Destroy()
gc.collect() # ensure context gets destroyed if it should be
def _vispy_get_size(self):
if self._vispy_canvas is None:
return
w, h = self.GetClientSize()
return w, h
def _vispy_get_position(self):
if self._vispy_canvas is None:
return
x, y = self.GetPosition()
return x, y
def on_close(self, evt):
if not self: # wx control evaluates to false if C++ part deleted
return
if self._vispy_canvas is None:
return
self._vispy_canvas.close()
def on_mouse_event(self, evt):
if self._vispy_canvas is None:
return
pos = (evt.GetX(), evt.GetY())
mods = _get_mods(evt)
if evt.GetWheelRotation() != 0:
delta = (0., float(evt.GetWheelRotation())/120.0)
self._vispy_canvas.events.mouse_wheel(delta=delta, pos=pos,
modifiers=mods)
elif evt.Moving() or evt.Dragging(): # mouse move event
self._vispy_mouse_move(pos=pos, modifiers=mods)
elif evt.ButtonDown():
if evt.LeftDown():
button = 1
elif evt.MiddleDown():
button = 3
elif evt.RightDown():
button = 2
else:
evt.Skip()
self._vispy_mouse_press(pos=pos, button=button, modifiers=mods)
elif evt.ButtonUp():
if evt.LeftUp():
button = 1
elif evt.MiddleUp():
button = 3
elif evt.RightUp():
button = 2
else:
evt.Skip()
self._vispy_mouse_release(pos=pos, button=button, modifiers=mods)
elif evt.ButtonDClick():
if evt.LeftDClick():
button = 1
elif evt.MiddleDClick():
button = 3
elif evt.RightDClick():
button = 2
else:
evt.Skip()
self._vispy_mouse_press(pos=pos, button=button, modifiers=mods)
self._vispy_mouse_double_click(pos=pos, button=button,
modifiers=mods)
evt.Skip()
def on_key_down(self, evt):
if self._vispy_canvas is None:
return
key, text = _process_key(evt)
self._vispy_canvas.events.key_press(key=key, text=text,
modifiers=_get_mods(evt))
def on_key_up(self, evt):
if self._vispy_canvas is None:
return
key, text = _process_key(evt)
self._vispy_canvas.events.key_release(key=key, text=text,
modifiers=_get_mods(evt))
# ------------------------------------------------------------------- timer ---
class TimerBackend(BaseTimerBackend):
def __init__(self, vispy_timer):
BaseTimerBackend.__init__(self, vispy_timer)
assert _wx_app is not None
parent = _wx_app.GetTopWindow() # assume it's the parent window
self._timer = wx.Timer(parent, -1)
parent.Bind(wx.EVT_TIMER, self._vispy_timeout, self._timer)
def _vispy_start(self, interval):
self._timer.Start(interval * 1000., False)
def _vispy_stop(self):
self._timer.Stop()
def _vispy_timeout(self, evt):
self._vispy_timer._timeout()
evt.Skip()
| 31.503282 | 79 | 0.567896 |
aced3ab0248554848138ae11cd192e22cc536d4b | 2,821 | py | Python | run_joint/console_roberta_bias.py | CogComp/EventProcessTyping | 7f5c3a21ae82d7c9d5d4e82a3700acb89f087c07 | [
"MIT"
] | 1 | 2021-11-24T22:21:01.000Z | 2021-11-24T22:21:01.000Z | run_joint/console_roberta_bias.py | CogComp/EventProcessTyping | 7f5c3a21ae82d7c9d5d4e82a3700acb89f087c07 | [
"MIT"
] | null | null | null | run_joint/console_roberta_bias.py | CogComp/EventProcessTyping | 7f5c3a21ae82d7c9d5d4e82a3700acb89f087c07 | [
"MIT"
] | 1 | 2021-09-26T01:58:50.000Z | 2021-09-26T01:58:50.000Z | import torch
print(torch.cuda.is_available())
from transformers import RobertaTokenizer, RobertaModel, GPT2Model, RobertaForMultipleChoice
import tqdm, sklearn
import numpy as np
import os, time, sys
import pickle
import multiprocessing
from multiprocessing import Process, Value, Manager
from itertools import chain
import scipy, random
import json
if '../utils' not in sys.path:
sys.path.append('../utils')
from data import Data
from jointSSmrl_roberta_bias import torchpart
import nltk
nltk.download('wordnet')
from nltk.corpus import wordnet as wn
def get_hypernym_path(input_word, max_length=20, return_single_set = True):
paths = list()
syn_sets = wn.synsets(input_word)
for syn in syn_sets:
raw_path = syn.hypernym_paths()
for p in raw_path:
tmp_path = [input_word]
last_node = input_word
for tmp_synset in p[::-1]:
tmp_postag = tmp_synset._name.split('.')[1]
if tmp_postag == 'v':
new_node = tmp_synset._name
else:
new_node = tmp_synset._name
dot_p = new_node.find('.')
if dot_p > 0:
new_node = new_node[:dot_p]
tmp_path.append(new_node)
last_node = new_node
paths.append(tmp_path[:max_length])
if len(paths) == 0:
paths = [[input_word]]
if return_single_set:
sets = set([])
for x in paths:
for y in x:
sets.add(y)
return sets
return paths
def main():
data_bin, model_bin = '../run/seqVerbMC/data_subsrl_1sv_1sa_argtrim.bin', './full_model/Roberta_BI/full_model_sptoken_ep121_a1.0_m1-0.1_m2-0.1.bin'
data = Data()
if os.path.exists(data_bin):
data.load(data_bin)
print ("==ATTN== ",len(data.processes)," sequences.")
else:
data.load_tsv_plain(data_file)
data.save(data_bin)
data.dump_dataset_format('./dataset_seq.tsv', 'seq')
# W/O n-1 gram
M = torchpart()
M.load(model_bin)
M.serve_verb([' '], data, limit_ids=None, topk=1), M.serve_arg([' '], data, limit_ids=None, topk=1)
sequence = input("Events split by @ (exit() to quit):")
#verbs, sequences, true_ids, v2s, limit_ids
while True:
sequence = sequence.strip().lower()
if sequence[:4] == 'exit':
exit()
sequence = sequence.split('@')
vtype, atype = M.serve_verb(sequence, data, limit_ids=None, topk=6), M.serve_arg(sequence, data, limit_ids=None, topk=6)
print (vtype, atype)
sequence = input("Events split by @ (exit() to quit):")
if __name__ == "__main__":
main()
| 31.344444 | 152 | 0.590925 |
aced3ad9f6b9b69cfd250c45bbb5f33a5f7936e2 | 168 | py | Python | chainer_openpose/links/__init__.py | iory/chainer-openpose | 020cf7c6946ecaa3e110d937a73565f4e76e3c42 | [
"MIT"
] | 3 | 2020-10-20T12:58:48.000Z | 2020-10-20T20:38:27.000Z | chainer_openpose/links/__init__.py | iory/chainer-openpose | 020cf7c6946ecaa3e110d937a73565f4e76e3c42 | [
"MIT"
] | null | null | null | chainer_openpose/links/__init__.py | iory/chainer-openpose | 020cf7c6946ecaa3e110d937a73565f4e76e3c42 | [
"MIT"
] | null | null | null | from chainer_openpose.links.model.openpose import OpenPoseNet # NOQA
from chainer_openpose.links.model.openpose.openpose_train_chain import OpenPoseTrainChain # NOQA
| 56 | 97 | 0.863095 |
aced3bd77cefd8756781606b852c4c0ca1001529 | 228 | py | Python | F_Machine_learning/2_Supervised-Learning/solutions/ex2_5.py | oercompbiomed/CBM101 | 20010dcb99fbf218c4789eb5918dcff8ceb94898 | [
"MIT"
] | 7 | 2019-07-03T07:41:55.000Z | 2022-02-06T20:25:37.000Z | F_Machine_learning/2_Supervised-Learning/solutions/ex2_5.py | oercompbiomed/CBM101 | 20010dcb99fbf218c4789eb5918dcff8ceb94898 | [
"MIT"
] | 9 | 2019-03-14T15:15:09.000Z | 2019-08-01T14:18:21.000Z | F_Machine_learning/2_Supervised-Learning/solutions/ex2_5.py | oercompbiomed/CBM101 | 20010dcb99fbf218c4789eb5918dcff8ceb94898 | [
"MIT"
] | 11 | 2019-03-12T10:43:11.000Z | 2021-10-05T12:15:00.000Z |
for k in [1, 7, 15]:
clf = neighbors.KNeighborsClassifier(n_neighbors = k)
clf.fit(X_train, y_train)
plot_decision_boundary(clf, X_test, y_test)
plt.show()
# The decision boundary is more fine-grained for small Ks. | 28.5 | 58 | 0.719298 |
aced3c58a973fc50909c91d8a283c38e0078ae5d | 22,034 | py | Python | ax/utils/testing/core_stubs.py | xiecong/Ax | f6501807bbc6bb952d636391231ebeb10646769a | [
"MIT"
] | null | null | null | ax/utils/testing/core_stubs.py | xiecong/Ax | f6501807bbc6bb952d636391231ebeb10646769a | [
"MIT"
] | null | null | null | ax/utils/testing/core_stubs.py | xiecong/Ax | f6501807bbc6bb952d636391231ebeb10646769a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from datetime import datetime
from typing import Dict, List, MutableMapping
import numpy as np
import pandas as pd
import torch
from ax.core.arm import Arm
from ax.core.batch_trial import AbandonedArm, BatchTrial
from ax.core.data import Data
from ax.core.experiment import Experiment
from ax.core.generator_run import GeneratorRun
from ax.core.metric import Metric
from ax.core.multi_type_experiment import MultiTypeExperiment
from ax.core.objective import Objective, ScalarizedObjective
from ax.core.optimization_config import OptimizationConfig
from ax.core.outcome_constraint import OutcomeConstraint
from ax.core.parameter import (
ChoiceParameter,
FixedParameter,
ParameterType,
RangeParameter,
)
from ax.core.parameter_constraint import (
OrderConstraint,
ParameterConstraint,
SumConstraint,
)
from ax.core.search_space import SearchSpace
from ax.core.simple_experiment import SimpleExperiment
from ax.core.trial import Trial
from ax.core.types import (
ComparisonOp,
TModelCov,
TModelMean,
TModelPredict,
TModelPredictArm,
TParameterization,
)
from ax.metrics.branin import BraninMetric
from ax.metrics.factorial import FactorialMetric
from ax.metrics.hartmann6 import Hartmann6Metric
from ax.modelbridge.factory import Cont_X_trans, get_factorial, get_sobol
from ax.runners.synthetic import SyntheticRunner
from ax.utils.common.logger import get_logger
logger = get_logger("ae_experiment")
# Experiments
def get_experiment() -> Experiment:
return Experiment(
name="test",
search_space=get_search_space(),
optimization_config=get_optimization_config(),
status_quo=get_status_quo(),
description="test description",
tracking_metrics=[Metric(name="tracking")],
is_test=True,
)
def get_branin_experiment(
has_optimization_config: bool = True,
with_batch: bool = False,
with_status_quo: bool = False,
) -> Experiment:
exp = Experiment(
name="branin_test_experiment",
search_space=get_branin_search_space(),
optimization_config=get_branin_optimization_config()
if has_optimization_config
else None,
runner=SyntheticRunner(),
is_test=True,
)
if with_status_quo:
exp.status_quo = Arm(parameters={"x1": 0.0, "x2": 0.0})
if with_batch:
sobol_generator = get_sobol(search_space=exp.search_space)
sobol_run = sobol_generator.gen(n=15)
exp.new_batch_trial(optimize_for_power=with_status_quo).add_generator_run(
sobol_run
)
return exp
def get_multi_type_experiment(
add_trial_type: bool = True, add_trials: bool = False
) -> MultiTypeExperiment:
oc = OptimizationConfig(Objective(BraninMetric("m1", ["x1", "x2"])))
experiment = MultiTypeExperiment(
name="test_exp",
search_space=get_branin_search_space(),
default_trial_type="type1",
default_runner=SyntheticRunner(dummy_metadata="dummy1"),
optimization_config=oc,
)
experiment.add_trial_type(
trial_type="type2", runner=SyntheticRunner(dummy_metadata="dummy2")
)
# Switch the order of variables so metric gives different results
experiment.add_tracking_metric(
BraninMetric("m2", ["x2", "x1"]), trial_type="type2", canonical_name="m1"
)
if add_trials and add_trial_type:
generator = get_sobol(experiment.search_space)
gr = generator.gen(10)
t1 = experiment.new_batch_trial(generator_run=gr, trial_type="type1")
t2 = experiment.new_batch_trial(generator_run=gr, trial_type="type2")
t1.set_status_quo_with_weight(status_quo=t1.arms[0], weight=0.5)
t2.set_status_quo_with_weight(status_quo=t2.arms[0], weight=0.5)
t1.run()
t2.run()
return experiment
def get_factorial_experiment(
has_optimization_config: bool = True,
with_batch: bool = False,
with_status_quo: bool = False,
) -> Experiment:
exp = Experiment(
name="factorial_test_experiment",
search_space=get_factorial_search_space(),
optimization_config=OptimizationConfig(
objective=Objective(metric=get_factorial_metric())
)
if has_optimization_config
else None,
runner=SyntheticRunner(),
is_test=True,
# pyre-fixme[6]: Expected `typing.Option...`List[FactorialMetric]`.
tracking_metrics=[get_factorial_metric("secondary_metric")],
)
if with_status_quo:
exp.status_quo = Arm(
parameters={
"factor1": "level11",
"factor2": "level21",
"factor3": "level31",
}
)
if with_batch:
factorial_generator = get_factorial(search_space=exp.search_space)
factorial_run = factorial_generator.gen(n=-1)
exp.new_batch_trial(optimize_for_power=with_status_quo).add_generator_run(
factorial_run
)
return exp
def get_simple_experiment() -> SimpleExperiment:
experiment = SimpleExperiment(
name="test_branin",
search_space=get_branin_search_space(),
status_quo=Arm(parameters={"x1": 0.0, "x2": 0.0}),
objective_name="sum",
)
experiment.description = "foobar"
return experiment
def get_simple_experiment_with_batch_trial() -> SimpleExperiment:
experiment = get_simple_experiment()
generator = get_sobol(experiment.search_space)
generator_run = generator.gen(10)
experiment.new_batch_trial(generator_run=generator_run)
return experiment
def get_experiment_with_repeated_arms(num_repeated_arms: int) -> Experiment:
batch_trial = get_batch_trial_with_repeated_arms(num_repeated_arms)
return batch_trial.experiment
def get_experiment_with_batch_trial() -> Experiment:
batch_trial = get_batch_trial()
return batch_trial.experiment
def get_experiment_with_batch_and_single_trial() -> Experiment:
batch_trial = get_batch_trial()
batch_trial.experiment.new_trial(generator_run=GeneratorRun(arms=[get_arm()]))
return batch_trial.experiment
def get_experiment_with_data() -> Experiment:
batch_trial = get_batch_trial()
batch_trial.experiment.attach_data(data=get_data())
batch_trial.experiment.attach_data(data=get_data())
batch_trial.experiment.attach_data(data=get_data())
return batch_trial.experiment
# Search Spaces
def get_search_space() -> SearchSpace:
parameters = [
get_range_parameter(),
get_range_parameter2(),
get_choice_parameter(),
get_fixed_parameter(),
]
return SearchSpace(
# pyre: Expected `List[ax.core.parameter.Parameter]` for 1st
# pyre: parameter `parameters` to call `ax.core.search_space.
# pyre: SearchSpace.__init__` but got `List[typing.
# pyre-fixme[6]: Union[ChoiceParameter, FixedParameter, RangeParameter]]`.
parameters=parameters,
parameter_constraints=[
get_order_constraint(),
get_parameter_constraint(),
get_sum_constraint1(),
],
)
def get_branin_search_space() -> SearchSpace:
parameters = [
RangeParameter(
name="x1", parameter_type=ParameterType.FLOAT, lower=-5, upper=10
),
RangeParameter(
name="x2", parameter_type=ParameterType.FLOAT, lower=0, upper=15
),
]
# Expected `List[ax.core.parameter.Parameter]` for 2nd parameter
# `parameters` to call `ax.core.search_space.SearchSpace.__init__` but got
# `List[RangeParameter]`.
# pyre-fixme[6]:
return SearchSpace(parameters=parameters)
def get_factorial_search_space() -> SearchSpace:
return SearchSpace(
# Expected `List[ax.core.parameter.Parameter]` for 2nd parameter
# `parameters` to call `ax.core.search_space.SearchSpace.__init__` but
# got `List[ChoiceParameter]`.
parameters=[
ChoiceParameter(
name="factor1",
parameter_type=ParameterType.STRING,
# Expected `List[typing.Optional[typing.Union[bool, float, str]]]` for
# 4th parameter `values` to call
# `ax.core.parameter.ChoiceParameter.__init__` but got
# `List[str]`.
values=["level11", "level12", "level13"],
),
ChoiceParameter(
name="factor2",
parameter_type=ParameterType.STRING,
# Expected `List[typing.Optional[typing.Union[bool, float, str]]]` for
# 4th parameter `values` to call
# `ax.core.parameter.ChoiceParameter.__init__` but got
# `List[str]`.
values=["level21", "level22"],
),
ChoiceParameter(
name="factor3",
parameter_type=ParameterType.STRING,
# Expected `List[typing.Optional[typing.Union[bool, float, str]]]` for
# 4th parameter `values` to call
# `ax.core.parameter.ChoiceParameter.__init__` but got
# `List[str]`.
values=["level31", "level32", "level33", "level34"],
),
]
)
def get_search_space_for_value(val: float = 3.0) -> SearchSpace:
return SearchSpace([FixedParameter("x", ParameterType.FLOAT, val)])
def get_search_space_for_range_value(min: float = 3.0, max: float = 6.0) -> SearchSpace:
return SearchSpace([RangeParameter("x", ParameterType.FLOAT, min, max)])
def get_search_space_for_range_values(
min: float = 3.0, max: float = 6.0
) -> SearchSpace:
return SearchSpace(
[
RangeParameter("x", ParameterType.FLOAT, min, max),
RangeParameter("y", ParameterType.FLOAT, min, max),
]
)
def get_discrete_search_space() -> SearchSpace:
return SearchSpace(
[
RangeParameter("x", ParameterType.INT, 0, 3),
RangeParameter("y", ParameterType.INT, 5, 7),
ChoiceParameter("z", ParameterType.STRING, ["red", "panda"]),
]
)
# Trials
def get_batch_trial(abandon_arm: bool = True) -> BatchTrial:
experiment = get_experiment()
batch = experiment.new_batch_trial()
arms = get_arms_from_dict(get_arm_weights1())
weights = get_weights_from_dict(get_arm_weights1())
batch.add_arms_and_weights(arms=arms, weights=weights, multiplier=0.75)
if abandon_arm:
batch.mark_arm_abandoned(batch.arms[0].name, "abandoned reason")
batch.runner = SyntheticRunner()
batch.set_status_quo_with_weight(status_quo=arms[0], weight=0.5)
return batch
def get_batch_trial_with_repeated_arms(num_repeated_arms: int) -> BatchTrial:
""" Create a batch that contains both new arms and N arms from the last
existed trial in the experiment. Where N is equal to the input argument
'num_repeated_arms'.
"""
experiment = get_experiment_with_batch_trial()
if len(experiment.trials) > 0:
# Get last (previous) trial.
prev_trial = experiment.trials[len(experiment.trials) - 1]
# Take the first N arms, where N is num_repeated_arms.
if len(prev_trial.arms) < num_repeated_arms:
logger.warning(
"There are less arms in the previous trial than the value of "
"input parameter 'num_repeated_arms'. Thus all the arms from "
"the last trial will be repeated in the new trial."
)
prev_arms = prev_trial.arms[:num_repeated_arms]
if isinstance(prev_trial, BatchTrial):
prev_weights = prev_trial.weights[:num_repeated_arms]
else:
prev_weights = [1] * len(prev_arms)
else:
raise Exception(
"There are no previous trials in this experiment. Thus the new "
"batch was not created as no repeated arms could be added."
)
# Create new (next) arms.
next_arms = get_arms_from_dict(get_arm_weights2())
next_weights = get_weights_from_dict(get_arm_weights2())
# Add num_repeated_arms to the new trial.
arms = prev_arms + next_arms
weights = prev_weights + next_weights
batch = experiment.new_batch_trial()
batch.add_arms_and_weights(arms=arms, weights=weights, multiplier=1)
batch.runner = SyntheticRunner()
batch.set_status_quo_with_weight(status_quo=arms[0], weight=0.5)
return batch
def get_trial() -> Trial:
experiment = get_experiment()
trial = experiment.new_trial()
arm = get_arms_from_dict(get_arm_weights1())[0]
trial.add_arm(arm)
trial.runner = SyntheticRunner()
return trial
# Parameters
def get_range_parameter() -> RangeParameter:
return RangeParameter(
name="w",
parameter_type=ParameterType.FLOAT,
lower=0.5,
upper=5.5,
log_scale=False,
digits=5,
)
def get_range_parameter2() -> RangeParameter:
return RangeParameter(name="x", parameter_type=ParameterType.INT, lower=1, upper=10)
def get_choice_parameter() -> ChoiceParameter:
return ChoiceParameter(
name="y",
parameter_type=ParameterType.STRING,
# Expected `List[typing.Optional[typing.Union[bool, float, str]]]` for 4th
# parameter `values` to call
# `ax.core.parameter.ChoiceParameter.__init__` but got `List[str]`.
values=["foo", "bar", "baz"],
)
def get_fixed_parameter() -> FixedParameter:
return FixedParameter(name="z", parameter_type=ParameterType.BOOL, value=True)
# Parameter Constraints
def get_order_constraint() -> OrderConstraint:
w = get_range_parameter()
x = get_range_parameter2()
return OrderConstraint(lower_parameter=x, upper_parameter=w)
def get_parameter_constraint() -> ParameterConstraint:
return ParameterConstraint(constraint_dict={"x": 1.0, "w": -1.0}, bound=1.0)
def get_sum_constraint1() -> SumConstraint:
w = get_range_parameter()
x = get_range_parameter2()
return SumConstraint(parameters=[x, w], is_upper_bound=False, bound=10.0)
def get_sum_constraint2() -> SumConstraint:
w = get_range_parameter()
x = get_range_parameter2()
return SumConstraint(parameters=[x, w], is_upper_bound=True, bound=10.0)
# Metrics
def get_metric() -> Metric:
return Metric(name="m1")
def get_branin_metric(name="branin") -> BraninMetric:
return BraninMetric(name=name, param_names=["x1", "x2"], noise_sd=0.01)
def get_hartmann_metric() -> Hartmann6Metric:
return Hartmann6Metric(name="hartmann", param_names=["x1", "x2"], noise_sd=0.01)
def get_factorial_metric(name: str = "success_metric") -> FactorialMetric:
coefficients = {
"factor1": {"level11": 0.1, "level12": 0.2, "level13": 0.3},
"factor2": {"level21": 0.1, "level22": 0.2},
"factor3": {"level31": 0.1, "level32": 0.2, "level33": 0.3, "level34": 0.4},
}
return FactorialMetric(
name=name,
# Expected `Dict[str, Dict[typing.Optional[typing.Union[bool, float, str]],
# float]]` for 3rd parameter `coefficients` to call
# `ax.metrics.factorial.FactorialMetric.__init__` but got `Dict[str,
# Dict[str, float]]`.
# pyre-fixme[6]:
coefficients=coefficients,
batch_size=int(1e4),
)
# Optimization Configs
def get_objective() -> Objective:
return Objective(metric=Metric(name="m1"), minimize=False)
def get_scalarized_objective() -> Objective:
return ScalarizedObjective(
metrics=[Metric(name="m1"), Metric(name="m2")],
weights=[1.0, 2.0],
minimize=False,
)
def get_outcome_constraint() -> OutcomeConstraint:
return OutcomeConstraint(metric=Metric(name="m2"), op=ComparisonOp.GEQ, bound=-0.25)
def get_optimization_config() -> OptimizationConfig:
objective = get_objective()
outcome_constraints = [get_outcome_constraint()]
return OptimizationConfig(
objective=objective, outcome_constraints=outcome_constraints
)
def get_branin_objective() -> Objective:
return Objective(metric=get_branin_metric(), minimize=False)
def get_branin_outcome_constraint() -> OutcomeConstraint:
return OutcomeConstraint(metric=get_branin_metric(), op=ComparisonOp.LEQ, bound=0)
def get_optimization_config_no_constraints() -> OptimizationConfig:
return OptimizationConfig(objective=Objective(metric=Metric("test_metric")))
def get_branin_optimization_config() -> OptimizationConfig:
return OptimizationConfig(objective=get_branin_objective())
# Arms
def get_arm() -> Arm:
# Expected `Dict[str, typing.Optional[typing.Union[bool, float, str]]]` for 2nd
# parameter `parameters` to call `ax.core.arm.Arm.__init__` but got
# `Dict[str, typing.Union[float, str]]`.
return Arm(parameters={"w": 0.75, "x": 1, "y": "foo", "z": True})
def get_status_quo() -> Arm:
return Arm(
# Expected `Dict[str, typing.Optional[typing.Union[bool, float, str]]]` for 2nd
# parameter `parameters` to call `ax.core.arm.Arm.__init__`
# but got `Dict[str, typing.Union[float, str]]`.
parameters={"w": 0.2, "x": 1, "y": "bar", "z": False},
name="status_quo",
)
def get_arm_weights1() -> MutableMapping[Arm, float]:
parameters_dicts: List[TParameterization] = [
{"w": 0.85, "x": 1, "y": "baz", "z": False},
{"w": 0.75, "x": 1, "y": "foo", "z": True},
{"w": 1.4, "x": 2, "y": "bar", "z": True},
]
arms = [Arm(param_dict) for param_dict in parameters_dicts]
weights = [0.25, 0.5, 0.25]
return OrderedDict(zip(arms, weights))
def get_arm_weights2() -> MutableMapping[Arm, float]: # update
parameters_dicts: List[TParameterization] = [
{"w": 0.96, "x": 3, "y": "hello", "z": True},
{"w": 0.16, "x": 4, "y": "dear", "z": True},
{"w": 3.1, "x": 5, "y": "world", "z": False},
]
arms = [Arm(param_dict) for param_dict in parameters_dicts]
weights = [0.25, 0.5, 0.25]
return OrderedDict(zip(arms, weights))
def get_arms_from_dict(arm_weights_dict: MutableMapping[Arm, float]) -> List[Arm]:
return list(arm_weights_dict.keys())
def get_weights_from_dict(arm_weights_dict: MutableMapping[Arm, float]) -> List[float]:
return list(arm_weights_dict.values())
def get_arms() -> List[Arm]:
return list(get_arm_weights1().keys())
def get_weights() -> List[float]:
return list(get_arm_weights1().values())
def get_branin_arms(n: int, seed: int) -> List[Arm]:
# TODO replace with sobol
np.random.seed(seed)
x1_raw = np.random.rand(n)
x2_raw = np.random.rand(n)
return [
Arm(parameters={"x1": -5 + x1_raw[i] * 15, "x2": x2_raw[i] * 15})
for i in range(n)
]
def get_abandoned_arm() -> AbandonedArm:
return AbandonedArm(name="0_0", reason="foobar", time=datetime.now())
# Generator Runs
def get_generator_run() -> GeneratorRun:
arms = get_arms_from_dict(get_arm_weights1())
weights = get_weights_from_dict(get_arm_weights1())
optimization_config = get_optimization_config()
search_space = get_search_space()
arm_predictions = get_model_predictions_per_arm()
return GeneratorRun(
arms=arms,
weights=weights,
optimization_config=optimization_config,
search_space=search_space,
model_predictions=get_model_predictions(),
best_arm_predictions=(arms[0], arm_predictions[arms[0].signature]),
fit_time=10.0,
gen_time=5.0,
model_key="Sobol",
model_kwargs={"scramble": False, "torch_device": torch.device("cpu")},
bridge_kwargs={"transforms": Cont_X_trans, "torch_dtype": torch.double},
)
def get_generator_run2() -> GeneratorRun:
arms = get_arms_from_dict(get_arm_weights1())
weights = get_weights_from_dict(get_arm_weights1())
return GeneratorRun(arms=arms, weights=weights)
# Runners
def get_synthetic_runner() -> SyntheticRunner:
return SyntheticRunner(dummy_metadata="foobar")
# Data
def get_data() -> Data:
df_dict = {
"trial_index": 0,
"metric_name": "ax_test_metric",
"arm_name": ["status_quo", "0_0", "0_1", "0_2", "0_3"],
"mean": [1, 3, 2, 2.25, 1.75],
"sem": [0, 0.5, 0.25, 0.40, 0.15],
"n": [100, 100, 100, 100, 100],
}
return Data(df=pd.DataFrame.from_records(df_dict))
def get_branin_data() -> Data:
df_dict = {
"trial_index": 0,
"metric_name": "branin",
"arm_name": ["0_0"],
"mean": [5.0],
"sem": [0.0],
}
return Data(df=pd.DataFrame.from_records(df_dict))
# Instances of types from core/types.py
def get_model_mean() -> TModelMean:
mean: TModelMean = {"test_metric_1": [1, 2, 3], "test_metric_2": [3, 4, 5]}
return mean
def get_model_covariance() -> TModelCov:
covariance: TModelCov = {
"test_metric_1": {"test_metric_1": [5, 6, 7], "test_metric_2": [7, 8, 9]},
"test_metric_2": {"test_metric_1": [9, 10, 11], "test_metric_2": [11, 12, 13]},
}
return covariance
def get_model_predictions() -> TModelPredict:
model_predictions: TModelPredict = (get_model_mean(), get_model_covariance())
return model_predictions
def get_model_predictions_per_arm() -> Dict[str, TModelPredictArm]:
arms = list(get_arm_weights1().keys())
means = get_model_mean()
covariances = get_model_covariance()
metric_names = list(means.keys())
m_1, m_2 = metric_names[0], metric_names[1]
return {
arms[i].signature: (
{m_1: means[m_1][i], m_2: means[m_2][i]},
{
m_1: {m_1: covariances[m_1][m_1][i], m_2: covariances[m_1][m_2][i]},
m_2: {m_1: covariances[m_2][m_1][i], m_2: covariances[m_2][m_2][i]},
},
)
for i in range(len(arms))
}
| 31.703597 | 88 | 0.663656 |
aced3c87cfc23d85228b5bdb1560bfc071d56a9d | 1,536 | py | Python | scripts/test_cliec2.py | lewisKit/rllabsharp | 3fb7c176ad47c54a626e89e65e731a142841033c | [
"MIT"
] | 1 | 2020-06-27T01:39:59.000Z | 2020-06-27T01:39:59.000Z | scripts/test_cliec2.py | lewisKit/rllabsharp | 3fb7c176ad47c54a626e89e65e731a142841033c | [
"MIT"
] | null | null | null | scripts/test_cliec2.py | lewisKit/rllabsharp | 3fb7c176ad47c54a626e89e65e731a142841033c | [
"MIT"
] | null | null | null | #! /usr/bin/env python
import multiprocessing
import os
import sys
import boto3
aws_access_key_id = os.environ.get("AWS_ACCESS_KEY", None)
aws_secret_access_key = os.environ.get("AWS_ACCESS_SECRET", None)
REGIONS = [
"us-east-1",
]
def highlight(x):
if not isinstance(x, str):
x = json.dumps(x, sort_keys=True, inden=2)
click.secho(x, fg='green')
def _collect_instances(region):
client = boto3.client(
"ec2",
region_name=region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
print("Collecting instances in region", region)
instances = [x['Instances'][0] for x in client.describe_instances(
Filters=[
{
'Name': 'instance-state-name',
'Values': [
'running'
]
},
]
)['Reservations']]
for instance in instances:
print(instance)
# print(instance['InstanceId'], instance['PublicIpAddress'])
if __name__ == '__main__':
# _collect_instances(REGIONS[0])
ec2 = boto3.resource('ec2')
instances = ec2.instances.filter(
Filters=[
{
"Name": "instance-state-name",
"Values": ['running'],
},
{
"Name": "instance-type",
"Values": ['c4.2xlarge']
}]
)
for instance in instances:
print(instance.id, instance.instance_type)
| 21.333333 | 70 | 0.547526 |
aced3d84ae023a682b2f797ad1209007a3c45d65 | 409 | py | Python | src/surface_to_type/__init__.py | ScottSnapperLab/surface_to_type | 9f9875d058b5354dec31e1ffe725e722d98f0d23 | [
"MIT"
] | null | null | null | src/surface_to_type/__init__.py | ScottSnapperLab/surface_to_type | 9f9875d058b5354dec31e1ffe725e722d98f0d23 | [
"MIT"
] | null | null | null | src/surface_to_type/__init__.py | ScottSnapperLab/surface_to_type | 9f9875d058b5354dec31e1ffe725e722d98f0d23 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Provide code that should be accessable from the TOP level of the package."""
# Imports
import logging
log = logging.getLogger(__name__)
from pathlib import Path
import munch
from surface_to_type.misc import load_csv
from surface_to_type.logging import setup_logging
# Metadata
__author__ = "Gus Dunn"
__email__ = "w.gus.dunn@gmail.com"
__all__ = ["setup_logging", "load_csv"]
| 18.590909 | 79 | 0.767726 |
aced3dc5015aac61c0e448dfdddf8fb3c3ec4336 | 393 | py | Python | src/exojax/spec/limb_darkening.py | dcmvdbekerom/exojax | 9b9305f8e383c73bdb97c1cfb0e276ddafcd75de | [
"MIT"
] | null | null | null | src/exojax/spec/limb_darkening.py | dcmvdbekerom/exojax | 9b9305f8e383c73bdb97c1cfb0e276ddafcd75de | [
"MIT"
] | null | null | null | src/exojax/spec/limb_darkening.py | dcmvdbekerom/exojax | 9b9305f8e383c73bdb97c1cfb0e276ddafcd75de | [
"MIT"
] | null | null | null | """Limb darkening functions
"""
import jax.numpy as jnp
def ld_kipping(q1,q2):
"""Uninformative prior conversion of the limb darkening by Kipping (arxiv:1308.0009)
Args:
q1: U(0,1)
q2: U(0,1)
Returns:
u1: quadratic LD coefficient u1
u2: quadratic LD coefficient u2
"""
sqrtq1=jnp.sqrt(q1)
return 2.0*sqrtq1*q2,sqrtq1*(1.0-2.0*q2)
| 18.714286 | 88 | 0.615776 |
aced3ecedfa6ade1b55434e62dea556f2f8dd0ab | 11,979 | py | Python | agents/ppo_parallel.py | balaz94/FRI-RL-SC2 | eaf2103265a999a16e786da134810681bf0266a0 | [
"MIT"
] | null | null | null | agents/ppo_parallel.py | balaz94/FRI-RL-SC2 | eaf2103265a999a16e786da134810681bf0266a0 | [
"MIT"
] | null | null | null | agents/ppo_parallel.py | balaz94/FRI-RL-SC2 | eaf2103265a999a16e786da134810681bf0266a0 | [
"MIT"
] | null | null | null | import numpy as np
import torch
import torch.nn.functional as F
from torch.multiprocessing import Process, Pipe
from utils.stats import MovingAverageScore, write_to_file
def worker(connection, env_params, env_func, count_of_iterations, count_of_envs,
count_of_steps, gamma, gae_lambda):
envs = [env_func(*env_params) for _ in range(count_of_envs)]
observations = torch.stack([torch.from_numpy(env.reset()) for env in envs])
game_score = np.zeros(count_of_envs)
mem_log_probs = torch.zeros((count_of_steps, count_of_envs, 1))
mem_actions = torch.zeros((count_of_steps, count_of_envs, 1), dtype=torch.long)
mem_values = torch.zeros((count_of_steps + 1, count_of_envs, 1))
mem_rewards = torch.zeros((count_of_steps, count_of_envs, 1))
for iteration in range(count_of_iterations):
mem_non_terminals = torch.ones((count_of_steps, count_of_envs, 1))
scores = []
for step in range(count_of_steps):
connection.send(observations.float())
logits, values = connection.recv()
probs = F.softmax(logits, dim=-1)
actions = probs.multinomial(num_samples=1)
log_probs = F.log_softmax(logits, dim=-1).gather(1, actions)
mem_log_probs[step] = log_probs
mem_actions[step] = actions
mem_values[step] = values
for idx in range(count_of_envs):
observation, reward, terminal, _ = envs[idx].step(actions[idx, 0].item())
mem_rewards[step, idx, 0] = reward
game_score[idx] += reward
if reward < 0:
mem_non_terminals[step, idx, 0] = 0
if terminal:
mem_non_terminals[step, idx, 0] = 0
scores.append(game_score[idx])
game_score[idx] = 0
observation = envs[idx].reset()
observations[idx] = torch.from_numpy(observation)
connection.send(observations.float())
mem_values[step + 1] = connection.recv()
mem_rewards = torch.clamp(mem_rewards, -1.0, 1.0)
advantages = torch.zeros((count_of_steps, count_of_envs, 1))
values = torch.zeros((count_of_steps, count_of_envs, 1))
t_gae = torch.zeros((count_of_envs, 1))
for step in reversed(range(count_of_steps)):
delta = mem_rewards[step] + gamma * mem_values[step + 1] * mem_non_terminals[step] \
- mem_values[step]
t_gae = delta + gamma * gae_lambda * t_gae * mem_non_terminals[step]
values[step] = t_gae + mem_values[step]
advantages[step] = t_gae.clone()
connection.send([mem_log_probs, mem_actions, values, advantages, scores])
connection.recv()
connection.close()
class Agent:
def __init__(self, model, optimizer, gamma=0.997, epsilon=0.1,
coef_value=0.5, coef_entropy=0.001, gae_lambda=0.95,
name='ppo', path='results/ppo/pong/', device='cpu', lr = 0.00025):
self.model = model
self.model.to(device)
if optimizer == 'Adam':
print('optimizer: Adam')
self.optimizer = torch.optim.Adam(self.model.parameters(), lr = lr)
elif optimizer == 'SGD':
print('optimizer: SGD wiht momentum = 0.9')
self.optimizer = torch.optim.SGD(self.model.parameters(), lr = lr, momentum = 0.9)
elif optimizer == 'RMS':
print('optimizer: RMSProp')
self.optimizer = torch.optim.RMSprop(self.model.parameters(), lr = lr)
else:
print('optimizer: SGD wiht momentum = 0.0')
self.optimizer = torch.optim.SGD(self.model.parameters(), lr = lr)
self.gamma = gamma
self.coef_value = coef_value
self.coef_entropy = coef_entropy
self.gae_lambda = gae_lambda
self.lower_bound = 1 - epsilon
self.upper_bound = 1 + epsilon
self.name = name
self.path = path
self.device = device
def train(self, env_params, env_func, count_of_actions,
count_of_iterations=10000, count_of_processes=2,
count_of_envs=16, count_of_steps=128, count_of_epochs=4,
batch_size=512, input_dim=(4, 96, 96)):
print('Training is starting')
logs_score = 'iteration,episode,avg_score,best_avg_score,best_score'
logs_loss = 'iteration,episode,policy,value,entropy'
score = MovingAverageScore()
buffer_size = count_of_processes * count_of_envs * count_of_steps
batches_per_iteration = count_of_epochs * buffer_size / batch_size
processes, connections = [], []
for _ in range(count_of_processes):
parr_connection, child_connection = Pipe()
process = Process(target=worker, args=(
child_connection, env_params, env_func, count_of_iterations,
count_of_envs, count_of_steps, self.gamma, self.gae_lambda))
connections.append(parr_connection)
processes.append(process)
process.start()
mem_dim = (count_of_processes, count_of_steps, count_of_envs)
mem_observations = torch.zeros((mem_dim + input_dim), device=self.device)
mem_actions = torch.zeros((*mem_dim, 1), device=self.device, dtype=torch.long)
mem_log_probs = torch.zeros((*mem_dim, 1), device=self.device)
mem_values = torch.zeros((*mem_dim, 1), device=self.device)
mem_advantages = torch.zeros((*mem_dim, 1), device=self.device)
for iteration in range(count_of_iterations):
for step in range(count_of_steps):
observations = [conn.recv() for conn in connections]
observations = torch.stack(observations).to(self.device)
mem_observations[:, step] = observations
with torch.no_grad():
logits, values = self.model(observations.view(-1, *input_dim))
# If you selected actions in the main process, your iteration
# would last about 0.5 seconds longer (measured on 2 processes)
logits = logits.view(-1, count_of_envs, count_of_actions).cpu()
values = values.view(-1, count_of_envs, 1).cpu()
for idx in range(count_of_processes):
connections[idx].send([logits[idx], values[idx]])
observations = [conn.recv() for conn in connections]
observations = torch.stack(observations).to(self.device)
with torch.no_grad():
_, values = self.model(torch.stack(observations).to(self.device).view(-1, *input_dim))
values = values.view(-1, count_of_envs, 1).cpu()
for conn_idx in range(count_of_processes):
connections[conn_idx].send(values[conn_idx])
mem_observations, mem_actions, mem_log_probs, mem_target_values, mem_advantages, end_games = [], [], [], [], [], []
for connection in connections:
observations, actions, log_probs, target_values, advantages, score_of_end_games = connection.recv()
mem_observations.append(observations)
mem_actions.append(actions)
mem_log_probs.append(log_probs)
mem_target_values.append(target_values)
mem_advantages.append(advantages)
end_games.extend(score_of_end_games)
count_of_end_games = len(end_games)
if count_of_end_games > 0:
new_score = True
count_of_episodes += count_of_end_games
scores.extend(end_games)
best_score = max(best_score, np.max(end_games))
length = len(scores)
if length > 100:
scores = scores[length - 100:]
avg_score = np.average(scores)
prev_avg_score = best_avg_score
best_avg_score = max(best_avg_score, avg_score)
logs += '\n' + str(iteration) + ',' + str(count_of_episodes) + ',' + str(avg_score) + ',' + str(best_score) + ',' + str(best_avg_score)
if iteration % 1 == 0:
print('iteration', iteration, '\tepisode', count_of_episodes, '\tavg score', avg_score, '\tbest score', best_score, '\tbest avg score', best_avg_score)
mem_observations = torch.stack(mem_observations).to(self.device).view((-1, ) + input_dim)
mem_actions = torch.stack(mem_actions).to(self.device).view(-1, 1)
mem_log_probs = torch.stack(mem_log_probs).to(self.device).view(-1, 1)
mem_target_values = torch.stack(mem_target_values).to(self.device).view(-1, 1)
mem_advantages = torch.stack(mem_advantages).to(self.device).view(-1, 1)
mem_advantages = (mem_advantages - torch.mean(mem_advantages)) / (torch.std(mem_advantages) + 1e-5)
sum_policy_loss, sum_value_loss, sum_entropy_loss = 0, 0, 0
for epoch in range(count_of_epochs):
perm = torch.randperm(buffer_size, device=self.device).view(-1, batch_size)
for idx in perm:
logits, values = self.model(mem_observations[idx])
probs = F.softmax(logits, dim=-1)
log_probs = F.log_softmax(logits, dim=-1)
new_log_probs = log_probs.gather(1, mem_actions[idx])
entropy_loss = (log_probs * probs).sum(1, keepdim=True).mean()
value_loss = F.mse_loss(values, mem_values[idx])
ratio = torch.exp(new_log_probs - mem_log_probs[idx])
surr_policy = ratio * mem_advantages[idx]
surr_clip = torch.clamp(ratio, self.lower_bound, self.upper_bound) \
* mem_advantages[idx]
policy_loss = - torch.min(surr_policy, surr_clip).mean()
s_policy += policy_loss.item()
s_value += value_loss.item()
s_entropy += entropy_loss.item()
self.optimizer.zero_grad()
loss = policy_loss + self.coef_value * value_loss \
+ self.coef_entropy * entropy_loss
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)
self.optimizer.step()
logs_losses += '\n' + str(iteration) + ',' + str(len(scores)) + ',' + str(sum_policy_loss / count_of_losses) + ',' + str(sum_value_loss / count_of_losses) + ',' + str(sum_entropy_loss / count_of_losses)
if iteration % 1 == 0:
write_to_file(logs, self.results_path + 'data/' + self.name + '.txt')
write_to_file(logs_losses, self.results_path + 'data/' + self.name + '_loss.txt')
if best_avg_score > prev_avg_score:
self.save_model()
logs_loss += '\n' + str(iteration) + ',' \
+ str(avg_score) + ',' \
+ str(s_policy / batches_per_iteration) + ',' \
+ str(s_value / batches_per_iteration) + ',' \
+ str(s_entropy / batches_per_iteration)
if iteration % 10 == 0:
write_to_file(logs_score, self.path + 'data/' + self.name + '.txt')
write_to_file(logs_loss, self.path + 'data/' + self.name + '_loss.txt')
print('Training has ended, best avg score is ', score.get_best_avg_score())
for connection in connections:
connection.send(1)
for process in processes:
process.join()
# def save_model(self):
# torch.save(self.model.state_dict(), self.results_path + 'models/' + self.name + str(self.iteration) + '_ppo.pt')
def load_model(self, path):
self.model.load_state_dict(torch.load(path))
| 46.976471 | 215 | 0.593706 |
aced3f4e41afe80bda4739867d1d9023e3cf5672 | 9,194 | py | Python | examples/tensorflow/train/wide-deep/code/wide_deep.py | FinchZHU/uai-sdk | 78e06bebba2d18233ce6dcb5be619e940f7a7ef3 | [
"Apache-2.0"
] | 38 | 2017-04-26T04:00:09.000Z | 2022-02-10T02:51:05.000Z | examples/tensorflow/train/wide-deep/code/wide_deep.py | FinchZHU/uai-sdk | 78e06bebba2d18233ce6dcb5be619e940f7a7ef3 | [
"Apache-2.0"
] | 17 | 2017-11-20T20:47:09.000Z | 2022-02-09T23:48:46.000Z | examples/tensorflow/train/wide-deep/code/wide_deep.py | FinchZHU/uai-sdk | 78e06bebba2d18233ce6dcb5be619e940f7a7ef3 | [
"Apache-2.0"
] | 28 | 2017-07-08T05:23:13.000Z | 2020-08-18T03:12:27.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example code for TensorFlow Wide & Deep Tutorial using TF.Learn API.
Modified by UAI Author
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import shutil
import sys
import os
import tensorflow as tf
_CSV_COLUMNS = [
'age', 'workclass', 'fnlwgt', 'education', 'education_num',
'marital_status', 'occupation', 'relationship', 'race', 'gender',
'capital_gain', 'capital_loss', 'hours_per_week', 'native_country',
'income_bracket'
]
_CSV_COLUMN_DEFAULTS = [[0], [''], [0], [''], [0], [''], [''], [''], [''], [''],
[0], [0], [0], [''], ['']]
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_dir', type=str, default='/tmp/census_model',
help='Base directory for the model.')
parser.add_argument(
'--model_type', type=str, default='wide_deep',
help="Valid model types: {'wide', 'deep', 'wide_deep'}.")
parser.add_argument(
'--train_epochs', type=int, default=40, help='Number of training epochs.')
parser.add_argument(
'--epochs_per_eval', type=int, default=2,
help='The number of training epochs to run between evaluations.')
parser.add_argument(
'--batch_size', type=int, default=40, help='Number of examples per batch.')
parser.add_argument(
'--train_data', type=str, default='/tmp/census_data/adult.data',
help='Path to the training data.')
parser.add_argument(
'--test_data', type=str, default='/tmp/census_data/adult.test',
help='Path to the test data.')
'''
Add UAI Related arguments
'''
parser.add_argument(
'--data_dir', type=str, default='/data/data/',
help='Path to the data.')
parser.add_argument(
'--output_dir', type=str, default='/data/output',
help='Path to the output data.')
_NUM_EXAMPLES = {
'train': 32561,
#'train': 520976,
'validation': 16281,
}
def build_model_columns():
"""Builds a set of wide and deep feature columns."""
# Continuous columns
age = tf.feature_column.numeric_column('age')
education_num = tf.feature_column.numeric_column('education_num')
capital_gain = tf.feature_column.numeric_column('capital_gain')
capital_loss = tf.feature_column.numeric_column('capital_loss')
hours_per_week = tf.feature_column.numeric_column('hours_per_week')
education = tf.feature_column.categorical_column_with_vocabulary_list(
'education', [
'Bachelors', 'HS-grad', '11th', 'Masters', '9th', 'Some-college',
'Assoc-acdm', 'Assoc-voc', '7th-8th', 'Doctorate', 'Prof-school',
'5th-6th', '10th', '1st-4th', 'Preschool', '12th'])
marital_status = tf.feature_column.categorical_column_with_vocabulary_list(
'marital_status', [
'Married-civ-spouse', 'Divorced', 'Married-spouse-absent',
'Never-married', 'Separated', 'Married-AF-spouse', 'Widowed'])
relationship = tf.feature_column.categorical_column_with_vocabulary_list(
'relationship', [
'Husband', 'Not-in-family', 'Wife', 'Own-child', 'Unmarried',
'Other-relative'])
workclass = tf.feature_column.categorical_column_with_vocabulary_list(
'workclass', [
'Self-emp-not-inc', 'Private', 'State-gov', 'Federal-gov',
'Local-gov', '?', 'Self-emp-inc', 'Without-pay', 'Never-worked'])
# To show an example of hashing:
occupation = tf.feature_column.categorical_column_with_hash_bucket(
'occupation', hash_bucket_size=1000)
# Transformations.
age_buckets = tf.feature_column.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
# Wide columns and deep columns.
base_columns = [
education, marital_status, relationship, workclass, occupation,
age_buckets,
]
crossed_columns = [
tf.feature_column.crossed_column(
['education', 'occupation'], hash_bucket_size=1000),
tf.feature_column.crossed_column(
[age_buckets, 'education', 'occupation'], hash_bucket_size=1000),
]
wide_columns = base_columns + crossed_columns
deep_columns = [
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
tf.feature_column.indicator_column(workclass),
tf.feature_column.indicator_column(education),
tf.feature_column.indicator_column(marital_status),
tf.feature_column.indicator_column(relationship),
# To show an example of embedding
tf.feature_column.embedding_column(occupation, dimension=8),
]
return wide_columns, deep_columns
def build_estimator(model_dir, model_type):
"""Build an estimator appropriate for the given model type."""
wide_columns, deep_columns = build_model_columns()
hidden_units = [100, 75, 50, 25]
# You can choose to use 'GPU' or CPU through set device_count {'GPU'} to 1 or 0
# According to our profiling, deep-wide model is a CPU sensitive model which means
# it will consumes more CPU resources than GPU. We do not recommand you to use multi-GPU
# in this model.
# Futher when batch_size is small, it is more efficient to run it on CPU instead of GPU
run_config = tf.estimator.RunConfig().replace(
session_config=tf.ConfigProto(device_count={'GPU': 1}))
if model_type == 'wide':
return tf.estimator.LinearClassifier(
model_dir=model_dir,
feature_columns=wide_columns,
config=run_config)
elif model_type == 'deep':
return tf.estimator.DNNClassifier(
model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=hidden_units,
config=run_config)
else:
return tf.estimator.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=hidden_units,
config=run_config)
def input_fn(data_file, num_epochs, shuffle, batch_size):
"""Generate an input function for the Estimator."""
assert tf.gfile.Exists(data_file), (
'%s not found. Please make sure you have either run data_download.py or '
'set both arguments --train_data and --test_data.' % data_file)
def parse_csv(value):
print('Parsing', data_file)
columns = tf.decode_csv(value, record_defaults=_CSV_COLUMN_DEFAULTS)
features = dict(zip(_CSV_COLUMNS, columns))
labels = features.pop('income_bracket')
return features, tf.equal(labels, '>50K')
# Extract lines from input files using the Dataset API.
dataset = tf.data.TextLineDataset(data_file)
if shuffle:
dataset = dataset.shuffle(buffer_size=_NUM_EXAMPLES['train'])
# We add a dataset.prefetch here to improve the performance
# The num_parallel_calls and prefetch size should be tuned according to your data
dataset = dataset.map(parse_csv, num_parallel_calls=5)
dataset = dataset.prefetch(batch_size)
# We call repeat after shuffling, rather than before, to prevent separate
# epochs from blending together.
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
def main(unused_argv):
# Clean up the model directory if present
'''UAI Train Related
Use output_dir in UAI instead of model_dir or you cna change model_dir to /data/output/
'''
shutil.rmtree(FLAGS.output_dir, ignore_errors=True)
model = build_estimator(FLAGS.output_dir, FLAGS.model_type)
#hook = tf.train.ProfilerHook(save_steps=10, output_dir='/data/output/')
# Train and evaluate the model every `FLAGS.epochs_per_eval` epochs.
for n in range(FLAGS.train_epochs // FLAGS.epochs_per_eval):
'''UAI Train Related
Note: UAI Train data input is FLAGS.data_dir (/data/data/),
so let FLAGS.train_data and FLAGS.test_data join with it (Please use relative path)
'''
model.train(input_fn=lambda: input_fn(
os.path.join(FLAGS.data_dir, FLAGS.train_data), FLAGS.epochs_per_eval, True, FLAGS.batch_size))#, hooks=[hook])
results = model.evaluate(input_fn=lambda: input_fn(
os.path.join(FLAGS.data_dir, FLAGS.test_data), 1, False, FLAGS.batch_size))
# Display evaluation metrics
print('Results at epoch', (n + 1) * FLAGS.epochs_per_eval)
print('-' * 60)
for key in sorted(results):
print('%s: %s' % (key, results[key]))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 36.054902 | 119 | 0.696541 |
aced3f728e9c6546b85fed9dc12fd5bd07d73739 | 2,155 | py | Python | populate_data.py | quang2705/Data_Aggregation | 439d62600d97adaa2e585f4fb4fbb726631b0048 | [
"MIT"
] | null | null | null | populate_data.py | quang2705/Data_Aggregation | 439d62600d97adaa2e585f4fb4fbb726631b0048 | [
"MIT"
] | 3 | 2020-06-06T01:46:18.000Z | 2021-06-10T22:48:13.000Z | populate_data.py | quang2705/Data_Aggregation | 439d62600d97adaa2e585f4fb4fbb726631b0048 | [
"MIT"
] | null | null | null | import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE','data_aggregate.settings')
import django
django.setup()
from data_agg_api.models import Temperature
import datetime
import random
import json
import requests
def create_temperatures():
print("Generating fake temperature data...")
no_data = 10
date = datetime.date.today()
for i in range (no_data):
temp = round(random.uniform(-100, 100),2)
hour = random.randint(0,23)
minute = random.randint(0, 59)
second = random.randint(0, 59)
time = datetime.time(hour, minute, second)
oneday = datetime.timedelta(days=1)
temperature = Temperature(temp=temp,date_time = str(date) + " " + str(time))
date = date - oneday
temperature.save()
print("Create a temperature data at {0} Celcius, at {1} on {2}".format(temp, time, date))
def create_json_data(no_data=10):
print("Generating fake JSON data...")
data_json = {"data":[]}
date = datetime.date.today()
for i in range(no_data):
temp = round(random.uniform(-100, 100),2)
hour = random.randint(0,23)
minute = random.randint(0, 59)
second = random.randint(0, 59)
time = datetime.time(hour, minute, second)
oneday = datetime.timedelta(days=1)
temperature = {
"date_time": str(date) + " " + str(time),
"val": temp
}
date = date - oneday
data_json["data"].append(temperature)
with open('data1.json', 'w') as json_file:
json.dump(data_json, json_file)
def main():
print("Type you command here: ", end='')
usage = input()
if (usage == 'simulate'):
print("Number of data: ", end = '')
no_data = int(input())
print("Number of loops: ", end='')
loop = int(input())
for i in range(loop):
print("generating and calling api")
create_json_data(no_data)
res = requests.post("http://localhost:8000/api/temperatures/upload/", data = {'data_file': open('data1.json', 'rb').read()})
if res.status_code != 200:
return 0
main()
| 31.231884 | 136 | 0.598608 |
aced3f73f32e6da0be682b217c7ab7d656d021fa | 3,764 | py | Python | main.py | echoround/RNN_stock_price_predictor | f5bbb0015e4c18776d7e2722146fe4c5e6fc58ef | [
"MIT"
] | null | null | null | main.py | echoround/RNN_stock_price_predictor | f5bbb0015e4c18776d7e2722146fe4c5e6fc58ef | [
"MIT"
] | null | null | null | main.py | echoround/RNN_stock_price_predictor | f5bbb0015e4c18776d7e2722146fe4c5e6fc58ef | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pandas_datareader as web
import pandas_datareader.data as pdr
import datetime as dt
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM
import yfinance as yf
yf.pdr_override()
# Load data
stock_ticker = 'NVDA'
start = '01-01-2012'
end = '02-07-2021'
start = dt.datetime.strptime(start, '%d-%m-%Y')
end = dt.datetime.strptime(end, '%d-%m-%Y')
#start = dt.datetime(2012, 1,1)
#end = dt.datetime(2020, 1, 1)
data = pdr.get_data_yahoo(stock_ticker, data_source='yahoo', start=start, end=end)
#data = web.DataReader(stock_ticker, 'yahoo', start, end)
# pre-processing
min_max = MinMaxScaler(feature_range=(0,1))
scaled_data = min_max.fit_transform(data['Close'].values.reshape(-1,1))
# decreasing prediction range will make the model more sensitive to short-term fluctuations and
# increasing the range will make it better at long-term prediction
prediction_range = 60
x_train = []
y_train = []
for i in range(prediction_range, len(scaled_data)):
x_train.append(scaled_data[i-prediction_range:i, 0])
y_train.append(scaled_data[i,0])
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
# the model (LSTM layers follower by Dropout layers until a final Dense layer that gives us the stock prediction)
model = Sequential()
model.add(LSTM(units=45, return_sequences=True, input_shape=(x_train.shape[1], 1)))
model.add(Dropout(0.2))
model.add(LSTM(units=45, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=45))
model.add(Dense(units=1)) # prediction of next closing value
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(x_train, y_train, epochs=20, batch_size=32)
# Testing data
# loading test data
#test_start = dt.datetime(2021,2,1)
#test_end = dt.datetime.now()
test_start = '01-02-2021'
test_end = '05-07-2021'
test_start = dt.datetime.strptime(test_start, '%d-%m-%Y')
test_end = dt.datetime.strptime(test_end, '%d-%m-%Y')
test_data = pdr.get_data_yahoo(stock_ticker, data_source='yahoo', test_start=start, test_end=end)
#test_data = web.DataReader(stock_ticker, 'yahoo', test_start, test_end)
actual_prices = test_data['Close'].values
total_data = pd.concat((data['Close'], test_data['Close']), axis=0)
model_inputs = total_data[len(total_data) - len(test_data) - prediction_range:].values
model_inputs = model_inputs.reshape(-1,1)
model_inputs = min_max.transform(model_inputs)
# make predictions on test data
x_test = []
for i in range(prediction_range, len(model_inputs)):
x_test.append(model_inputs[i-prediction_range:i,0])
x_test = np.array(x_test)
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
'''
predicted_prices = model.predict(x_test)
# prices are in transformed scale so need to inverse transform them back
predicted_prices = min_max.inverse_transform(predicted_prices)
# plot test predictions
plt.plot(actual_prices, color="blue", label="Actual {} price".format(stock_ticker))
plt.plot(predicted_prices, color="red", label="Predicted {} price".format(stock_ticker))
plt.title("{} stock price".format(stock_ticker))
plt.xlabel('Time')
plt.ylabel("{} stock price".format(stock_ticker))
plt.legend()
plt.show()
'''
# predict next day
real_data = [model_inputs[len(model_inputs) + 1 - prediction_range:len(model_inputs+1), 0]]
real_data = np.array(real_data)
real_data = np.reshape(real_data, (real_data.shape[0], real_data.shape[1], 1))
prediction = model.predict(real_data)
prediction = min_max.inverse_transform(prediction)
print("Prediction for {0}: {1}".format(stock_ticker, prediction))
| 29.873016 | 113 | 0.752922 |
aced3f9476b409ae916436dc69c110bf676c3d98 | 8,554 | py | Python | netconf/nc_rpc/rpc_response.py | sathishms77/test | bf8df6fc16c41720c7d99ed1ff17a64b543e9672 | [
"Apache-2.0"
] | 72 | 2017-01-18T02:36:34.000Z | 2022-02-12T15:28:30.000Z | netconf/nc_rpc/rpc_response.py | sathishms77/test | bf8df6fc16c41720c7d99ed1ff17a64b543e9672 | [
"Apache-2.0"
] | 11 | 2017-03-03T17:56:09.000Z | 2022-02-11T03:38:03.000Z | netconf/nc_rpc/rpc_response.py | sathishms77/test | bf8df6fc16c41720c7d99ed1ff17a64b543e9672 | [
"Apache-2.0"
] | 120 | 2017-02-02T23:26:11.000Z | 2022-03-13T05:30:23.000Z | #!/usr/bin/env python
#
# Copyright 2017 the original author or authors.
#
# Code adapted from https://github.com/choppsv1/netconf
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import structlog
from lxml import etree
import netconf.nc_common.error as ncerror
log = structlog.get_logger()
class RpcResponse():
def __init__(self, capabilities):
self.is_error = False
# if there is an error then the reply_node will contains an Error
# object
self.reply_node = None
self.close_session = False
self.capabilities = capabilities
self.custom_rpc = False
def build_xml_response(self, request, voltha_response, custom_rpc=False):
if request is None:
return
voltha_xml_string = etree.tostring(voltha_response)
# Remove the leading and trailing <yang> tags
if voltha_xml_string.startswith('<yang>'):
voltha_xml_string = voltha_xml_string[len('<yang>'):]
if voltha_xml_string.endswith('</yang>'):
voltha_xml_string = voltha_xml_string[:-len('</yang>')]
# Empty response
elif voltha_xml_string.startswith('<yang/>'):
voltha_xml_string = ''
# Replace any True/False data to true/false
voltha_xml_string = voltha_xml_string.replace('>False<', '>false<')
voltha_xml_string = voltha_xml_string.replace('>True<', '>true<')
if not custom_rpc:
# Create the xml body as
if request.has_key('subclass'):
body = ''.join([
'<data>',
'<',
request['class'],
' xmlns="',
request['namespace'],
'">',
'<',
request['subclass'],
'>',
voltha_xml_string,
'</',
request['subclass'],
'>',
'</',
request['class'],
'>',
'</data>'
])
else:
body = ''.join([
'<data>',
'<',
request['class'],
' xmlns="urn:opencord:params:xml:ns:voltha:voltha">',
voltha_xml_string,
'</',
request['class'],
'>',
'</data>'
])
else: # custom_rpc
body = ''.join([
'<rpc-reply',
' xmlns="urn:opencord:params:xml:ns:voltha:voltha">',
voltha_xml_string,
'</rpc-reply>',
])
return etree.fromstring(body)
def add_node(self, new_node, tree):
if new_node.tag == 'ignore':
# We want only sub-elements
for elem in list(new_node):
tree.append(elem)
else:
tree.append(new_node)
def copy_basic_element(self, elm):
new_elem = etree.Element(elm.tag)
new_elem.text = elm.text
return new_elem
def process_inline_option(self, elem):
inline_option = False
inline_node_name = None
for elm in list(elem):
if elm.tag == 'yang_field_option':
inline_option = True
if elm.tag == 'name':
inline_node_name = elm.text
if not inline_option:
new_elem = etree.Element(elem.tag)
return new_elem, elem
# look for the node with the inline_node_name
for elm in list(elem):
if elm.tag == inline_node_name:
new_elem = etree.Element('ignore')
return new_elem, elm
def process_element(self, elem):
attrib = elem.get('type')
if (attrib == 'list'):
if list(elem) is None:
return self.copy_basic_element(elem)
new_elem = etree.Element('ignore')
for elm in list(elem):
elm.tag = elem.tag
if elm.get('type') in ['list', 'dict']:
self.add_node(self.process_element(elm), new_elem)
else:
new_elem.append(self.copy_basic_element(elm))
return new_elem
elif (attrib == 'dict'):
# Empty case
if list(elem) is None:
return self.copy_basic_element(elem)
# Process field option.
new_elem, elem = self.process_inline_option(elem)
for elm in list(elem):
if elm.get('type') in ['list', 'dict']:
self.add_node(self.process_element(elm), new_elem)
else:
new_elem.append(self.copy_basic_element(elm))
return new_elem
else:
return self.copy_basic_element(elem)
def to_yang_xml(self, from_xml, request, yang_options=None,
custom_rpc=False):
# Parse from_xml as follows:
# 1. Any element having a list attribute shoud have each item move 1 level
# up and retag using the parent tag
# 2. Any element having a dict attribute and has a <yang_field_option>
# sub-element should have all it's items move to teh parent level
top = etree.Element('yang')
elms = list(from_xml)
xml_tag = yang_options[0]
list_items_name = yang_options[1]
# Handle the special case when the the xml contains 1 element which
# is a list type
if len(elms) == 1:
item = elms[0]
if item.get('type') == 'list':
if custom_rpc: # custom rpc request
if list_items_name == 'items': # no pre-processing required
self.add_node(self.process_element(item), top)
return top
if xml_tag:
item.tag = xml_tag
else:
item.tag = 'ignore'
else:
# Default netconf operations - may end up needing
# specific parsing per request type
if request.has_key('subclass'):
item.tag = request['subclass']
# remove the subclass element in request to avoid duplicate tag
del request['subclass']
elif list_items_name == 'items':
item.tag = xml_tag
else:
item.tag = 'ignore'
self.add_node(self.process_element(item), top)
return top
# Process normally for all other cases
for elm in elms:
self.add_node(self.process_element(elm), top)
return top
# Helper method to sort the xml message based on the xml tags
def sort_xml_response(self, xml):
for parent in xml.xpath('//*[./*]'): # Search for parent elements
parent[:] = sorted(parent, key=lambda x: x.tag)
return xml
# custom_rpc refers to custom RPCs different from Netconf default RPCs
# like get, get-config, edit-config, etc
def build_yang_response(self, root, request, yang_options=None,
custom_rpc=False):
try:
self.custom_rpc = custom_rpc
yang_xml = self.to_yang_xml(root, request, yang_options,
custom_rpc)
log.info('yang-xml', yang_xml=etree.tounicode(yang_xml,
pretty_print=True))
return self.build_xml_response(request, yang_xml, custom_rpc)
except Exception as e:
log.exception('error-building-yang-response', request=request,
xml=etree.tostring(root))
self.rpc_response.is_error = True
self.rpc_response.node = ncerror.BadMsg(request)
return
| 37.849558 | 87 | 0.527823 |
aced3fcdff4eeb5a8036bd72f1da7d000e613de8 | 2,139 | py | Python | var/spack/repos/builtin/packages/datatransferkit/package.py | lcnzg/spack | 5b9f60f9bb159113bfd8a0c8f3f4a8a0c2f55d7e | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/datatransferkit/package.py | lcnzg/spack | 5b9f60f9bb159113bfd8a0c8f3f4a8a0c2f55d7e | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/datatransferkit/package.py | lcnzg/spack | 5b9f60f9bb159113bfd8a0c8f3f4a8a0c2f55d7e | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Datatransferkit(CMakePackage):
"""DataTransferKit is an open-source software library of
parallel solution transfer services for multiphysics simulations"""
homepage = "https://datatransferkit.readthedoc.io"
url = "https://github.com/ORNL-CEES/DataTransferKit/archive/3.1-rc2.tar.gz"
git = "https://github.com/ORNL-CEES/DataTransferKit.git"
tags = ['e4s']
maintainers = ['Rombur']
version('master', branch='master', submodules=True)
version('3.1-rc2', commit='1abc1a43b33dffc7a16d7497b4185d09d865e36a', submodules=True)
variant('external-arborx', default=False,
description='use an external ArborX library instead of the submodule')
variant('openmp', default=False, description='enable OpenMP backend')
variant('serial', default=True, description='enable Serial backend (default)')
variant('shared', default=True,
description='enable the build of shared lib')
depends_on('arborx@1.0:', when='+external-arborx')
depends_on('boost')
depends_on('cmake', type='build')
depends_on('trilinos+intrepid2+shards~dtk')
depends_on('trilinos+openmp', when='+openmp')
depends_on('trilinos+stratimikos+belos', when='@master')
depends_on('trilinos@13:13.99', when='@3.1-rc2')
def cmake_args(self):
spec = self.spec
from_variant = self.define_from_variant
options = [
from_variant('BUILD_SHARED_LIBS', 'shared'),
'-DDataTransferKit_ENABLE_DataTransferKit=ON',
from_variant('DataTransferKit_ENABLE_ArborX_TPL', 'external-arborx'),
'-DDataTransferKit_ENABLE_TESTS=OFF',
'-DDataTransferKit_ENABLE_EXAMPLES=OFF',
'-DCMAKE_CXX_EXTENSIONS=OFF',
'-DCMAKE_CXX_STANDARD=14',
]
if '+openmp' in spec:
options.append('-DDataTransferKit_ENABLE_OpenMP=ON')
return options
| 37.526316 | 90 | 0.682094 |
aced411b06015692f820ad1406fe2c1e42e63cb0 | 21,353 | py | Python | src/sage/combinat/designs/latin_squares.py | hsm207/sage | 020bd59ec28717bfab9af44d2231c53da1ff99f1 | [
"BSL-1.0"
] | 4 | 2020-07-17T04:49:44.000Z | 2020-07-29T06:33:51.000Z | src/sage/combinat/designs/latin_squares.py | Ivo-Maffei/sage | 467fbc70a08b552b3de33d9065204ee9cbfb02c7 | [
"BSL-1.0"
] | 1 | 2020-04-18T16:30:43.000Z | 2020-04-18T16:30:43.000Z | src/sage/combinat/designs/latin_squares.py | dimpase/sage | 468f23815ade42a2192b0a9cd378de8fdc594dcd | [
"BSL-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
r"""
Mutually Orthogonal Latin Squares (MOLS)
The main function of this module is :func:`mutually_orthogonal_latin_squares`
and can be can be used to generate MOLS (or check that they exist)::
sage: MOLS = designs.mutually_orthogonal_latin_squares(4,8)
For more information on MOLS, see the :wikipedia:`Wikipedia entry on MOLS
<Graeco-Latin_square#Mutually_orthogonal_Latin_squares>`. If you are only
interested by latin squares, see :mod:`~sage.combinat.matrices.latin`.
The functions defined here are
.. csv-table::
:class: contentstable
:widths: 30, 70
:delim: |
:meth:`mutually_orthogonal_latin_squares` | Return `k` Mutually Orthogonal `n\times n` Latin Squares.
:meth:`are_mutually_orthogonal_latin_squares` | Check that the list ``l`` of matrices in are MOLS.
:meth:`latin_square_product` | Return the product of two (or more) latin squares.
:meth:`MOLS_table` | Prints the MOLS table.
**Table of MOLS**
Sage can produce a table of MOLS similar to the one from the Handbook of
Combinatorial Designs [DesignHandbook]_ (`available here
<http://books.google.fr/books?id=S9FA9rq1BgoC&dq=handbook%20combinatorial%20designs%20MOLS%2010000&pg=PA176>`_).
::
sage: from sage.combinat.designs.latin_squares import MOLS_table
sage: MOLS_table(600) # long time
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
________________________________________________________________________________
0| +oo +oo 1 2 3 4 1 6 7 8 2 10 5 12 4 4 15 16 5 18
20| 4 5 3 22 7 24 4 26 5 28 4 30 31 5 4 5 8 36 4 5
40| 7 40 5 42 5 6 4 46 8 48 6 5 5 52 5 6 7 7 5 58
60| 5 60 5 6 63 7 5 66 5 6 6 70 7 72 5 7 6 6 6 78
80| 9 80 8 82 6 6 6 6 7 88 6 7 6 6 6 6 7 96 6 8
100| 8 100 6 102 7 7 6 106 6 108 6 6 13 112 6 7 6 8 6 6
120| 7 120 6 6 6 124 6 126 127 7 6 130 6 7 6 7 7 136 6 138
140| 6 7 6 10 10 7 6 7 6 148 6 150 7 8 8 7 6 156 7 6
160| 9 7 6 162 6 7 6 166 7 168 6 8 6 172 6 6 14 9 6 178
180| 6 180 6 6 7 9 6 10 6 8 6 190 7 192 6 7 6 196 6 198
200| 7 7 6 7 6 8 6 8 14 11 10 210 6 7 6 7 7 8 6 10
220| 6 12 6 222 13 8 6 226 6 228 6 7 7 232 6 7 6 7 6 238
240| 7 240 6 242 6 7 6 12 7 7 6 250 6 12 9 7 255 256 6 12
260| 6 8 8 262 7 8 7 10 7 268 7 270 15 16 6 13 10 276 6 9
280| 7 280 6 282 6 12 6 7 15 288 6 6 6 292 6 6 7 10 10 12
300| 7 7 7 7 15 15 6 306 7 7 7 310 7 312 7 10 7 316 7 10
320| 15 15 6 16 8 12 6 7 7 9 6 330 7 8 7 6 7 336 6 7
340| 6 10 10 342 7 7 6 346 6 348 8 12 18 352 6 9 7 9 6 358
360| 7 360 6 7 7 7 6 366 15 15 7 15 7 372 7 15 7 13 7 378
380| 7 12 7 382 15 15 7 15 7 388 7 16 7 7 7 7 8 396 7 7
400| 15 400 7 15 11 8 7 15 8 408 7 13 8 12 10 9 18 15 7 418
420| 7 420 7 15 7 16 6 7 7 7 6 430 15 432 6 15 6 18 7 438
440| 7 15 7 442 7 13 7 11 15 448 7 15 7 7 7 15 7 456 7 16
460| 7 460 7 462 15 15 7 466 8 8 7 15 7 15 10 18 7 15 6 478
480| 15 15 6 15 8 7 6 486 7 15 6 490 6 16 6 7 15 15 6 498
500| 7 8 9 502 7 15 6 15 7 508 6 15 511 18 7 15 8 12 8 15
520| 8 520 10 522 12 15 8 16 15 528 7 15 8 12 7 15 8 15 10 15
540| 12 540 7 15 18 7 7 546 7 8 7 18 7 7 7 7 7 556 7 12
560| 15 7 7 562 7 7 6 7 7 568 6 570 7 7 15 22 8 576 7 7
580| 7 8 7 10 7 8 7 586 7 18 17 7 15 592 8 15 7 7 8 598
Comparison with the results from the Handbook of Combinatorial Designs (2ed)
[DesignHandbook]_::
sage: MOLS_table(600,compare=True) # long time
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
________________________________________________________________________________
0| + +
20|
40|
60| +
80|
100|
120|
140|
160|
180|
200| -
220|
240|
260|
280|
300|
320| -
340|
360| - -
380| -
400|
420| -
440|
460|
480|
500| -
520|
540|
560|
580|
.. TODO::
Look at [ColDin01]_.
REFERENCES:
.. [Stinson2004] Douglas R. Stinson,
*Combinatorial designs: construction and analysis*,
Springer, 2004.
.. [ColDin01] Charles Colbourn, Jeffrey Dinitz,
*Mutually orthogonal latin squares: a brief survey of constructions*,
Volume 95, Issues 1-2, Pages 9-48,
Journal of Statistical Planning and Inference,
Springer, 1 May 2001.
Functions
---------
"""
from __future__ import print_function, absolute_import
from sage.rings.integer import Integer
from sage.categories.sets_cat import EmptySetError
from sage.misc.unknown import Unknown
from sage.env import COMBINATORIAL_DESIGN_DATA_DIR
def are_mutually_orthogonal_latin_squares(l, verbose=False):
r"""
Check wether the list of matrices in ``l`` form mutually orthogonal latin
squares.
INPUT:
- ``verbose`` - if ``True`` then print why the list of matrices provided are
not mutually orthogonal latin squares
EXAMPLES::
sage: from sage.combinat.designs.latin_squares import are_mutually_orthogonal_latin_squares
sage: m1 = matrix([[0,1,2],[2,0,1],[1,2,0]])
sage: m2 = matrix([[0,1,2],[1,2,0],[2,0,1]])
sage: m3 = matrix([[0,1,2],[2,0,1],[1,2,0]])
sage: are_mutually_orthogonal_latin_squares([m1,m2])
True
sage: are_mutually_orthogonal_latin_squares([m1,m3])
False
sage: are_mutually_orthogonal_latin_squares([m2,m3])
True
sage: are_mutually_orthogonal_latin_squares([m1,m2,m3], verbose=True)
Squares 0 and 2 are not orthogonal
False
sage: m = designs.mutually_orthogonal_latin_squares(7,8)
sage: are_mutually_orthogonal_latin_squares(m)
True
TESTS:
Not a latin square::
sage: m1 = matrix([[0,1,0],[2,0,1],[1,2,0]])
sage: m2 = matrix([[0,1,2],[1,2,0],[2,0,1]])
sage: are_mutually_orthogonal_latin_squares([m1,m2], verbose=True)
Matrix 0 is not row latin
False
sage: m1 = matrix([[0,1,2],[1,0,2],[1,2,0]])
sage: are_mutually_orthogonal_latin_squares([m1,m2], verbose=True)
Matrix 0 is not column latin
False
sage: m1 = matrix([[0,0,0],[1,1,1],[2,2,2]])
sage: m2 = matrix([[0,1,2],[0,1,2],[0,1,2]])
sage: are_mutually_orthogonal_latin_squares([m1,m2])
False
"""
if not l:
raise ValueError("the list must be non empty")
n = l[0].ncols()
k = len(l)
if any(M.ncols() != n or M.nrows() != n for M in l):
if verbose:
print("Not all matrices are square matrices of the same dimensions")
return False
# Check that all matrices are latin squares
for i,M in enumerate(l):
if any(len(set(R)) != n for R in M):
if verbose:
print("Matrix {} is not row latin".format(i))
return False
if any(len(set(R)) != n for R in zip(*M)):
if verbose:
print("Matrix {} is not column latin".format(i))
return False
from .designs_pyx import is_orthogonal_array
return is_orthogonal_array(list(zip(*[[x for R in M for x in R] for M in l])),k,n, verbose=verbose, terminology="MOLS")
def mutually_orthogonal_latin_squares(k, n, partitions=False, check=True):
r"""
Return `k` Mutually Orthogonal `n\times n` Latin Squares (MOLS).
For more information on Mutually Orthogonal Latin Squares, see
:mod:`~sage.combinat.designs.latin_squares`.
INPUT:
- ``k`` (integer) -- number of MOLS. If ``k=None`` it is set to the largest
value available.
- ``n`` (integer) -- size of the latin square.
- ``partitions`` (boolean) -- a Latin Square can be seen as 3 partitions of
the `n^2` cells of the array into `n` sets of size `n`, respectively:
* The partition of rows
* The partition of columns
* The partition of number (cells numbered with 0, cells numbered with 1,
...)
These partitions have the additional property that any two sets from
different partitions intersect on exactly one element.
When ``partitions`` is set to ``True``, this function returns a list of `k+2`
partitions satisfying this intersection property instead of the `k+2` MOLS
(though the data is exactly the same in both cases).
- ``check`` -- (boolean) Whether to check that output is correct before
returning it. As this is expected to be useless (but we are cautious
guys), you may want to disable it whenever you want speed. Set to
``True`` by default.
EXAMPLES::
sage: designs.mutually_orthogonal_latin_squares(4,5)
[
[0 2 4 1 3] [0 3 1 4 2] [0 4 3 2 1] [0 1 2 3 4]
[4 1 3 0 2] [3 1 4 2 0] [2 1 0 4 3] [4 0 1 2 3]
[3 0 2 4 1] [1 4 2 0 3] [4 3 2 1 0] [3 4 0 1 2]
[2 4 1 3 0] [4 2 0 3 1] [1 0 4 3 2] [2 3 4 0 1]
[1 3 0 2 4], [2 0 3 1 4], [3 2 1 0 4], [1 2 3 4 0]
]
sage: designs.mutually_orthogonal_latin_squares(3,7)
[
[0 2 4 6 1 3 5] [0 3 6 2 5 1 4] [0 4 1 5 2 6 3]
[6 1 3 5 0 2 4] [5 1 4 0 3 6 2] [4 1 5 2 6 3 0]
[5 0 2 4 6 1 3] [3 6 2 5 1 4 0] [1 5 2 6 3 0 4]
[4 6 1 3 5 0 2] [1 4 0 3 6 2 5] [5 2 6 3 0 4 1]
[3 5 0 2 4 6 1] [6 2 5 1 4 0 3] [2 6 3 0 4 1 5]
[2 4 6 1 3 5 0] [4 0 3 6 2 5 1] [6 3 0 4 1 5 2]
[1 3 5 0 2 4 6], [2 5 1 4 0 3 6], [3 0 4 1 5 2 6]
]
sage: designs.mutually_orthogonal_latin_squares(2,5,partitions=True)
[[[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]],
[[0, 5, 10, 15, 20],
[1, 6, 11, 16, 21],
[2, 7, 12, 17, 22],
[3, 8, 13, 18, 23],
[4, 9, 14, 19, 24]],
[[0, 8, 11, 19, 22],
[3, 6, 14, 17, 20],
[1, 9, 12, 15, 23],
[4, 7, 10, 18, 21],
[2, 5, 13, 16, 24]],
[[0, 9, 13, 17, 21],
[2, 6, 10, 19, 23],
[4, 8, 12, 16, 20],
[1, 5, 14, 18, 22],
[3, 7, 11, 15, 24]]]
What is the maximum number of MOLS of size 8 that Sage knows how to build?::
sage: designs.orthogonal_arrays.largest_available_k(8)-2
7
If you only want to know if Sage is able to build a given set of
MOLS, query the ``orthogonal_arrays.*`` functions::
sage: designs.orthogonal_arrays.is_available(5+2, 5) # 5 MOLS of order 5
False
sage: designs.orthogonal_arrays.is_available(4+2,6) # 4 MOLS of order 6
False
Sage, however, is not able to prove that the second MOLS do not exist::
sage: designs.orthogonal_arrays.exists(4+2,6) # 4 MOLS of order 6
Unknown
If you ask for such a MOLS then you will respectively get an informative
``EmptySetError`` or ``NotImplementedError``::
sage: designs.mutually_orthogonal_latin_squares(5, 5)
Traceback (most recent call last):
...
EmptySetError: There exist at most n-1 MOLS of size n if n>=2.
sage: designs.mutually_orthogonal_latin_squares(4,6)
Traceback (most recent call last):
...
NotImplementedError: I don't know how to build 4 MOLS of order 6
TESTS:
The special case `n=1`::
sage: designs.mutually_orthogonal_latin_squares(3, 1)
[[0], [0], [0]]
Wrong input for `k`::
sage: designs.mutually_orthogonal_latin_squares(None, 1)
Traceback (most recent call last):
...
TypeError: k must be a positive integer
sage: designs.mutually_orthogonal_latin_squares(-1, 1)
Traceback (most recent call last):
...
ValueError: k must be positive
sage: designs.mutually_orthogonal_latin_squares(2,10)
[
[1 8 9 0 2 4 6 3 5 7] [1 7 6 5 0 9 8 2 3 4]
[7 2 8 9 0 3 5 4 6 1] [8 2 1 7 6 0 9 3 4 5]
[6 1 3 8 9 0 4 5 7 2] [9 8 3 2 1 7 0 4 5 6]
[5 7 2 4 8 9 0 6 1 3] [0 9 8 4 3 2 1 5 6 7]
[0 6 1 3 5 8 9 7 2 4] [2 0 9 8 5 4 3 6 7 1]
[9 0 7 2 4 6 8 1 3 5] [4 3 0 9 8 6 5 7 1 2]
[8 9 0 1 3 5 7 2 4 6] [6 5 4 0 9 8 7 1 2 3]
[2 3 4 5 6 7 1 8 9 0] [3 4 5 6 7 1 2 8 0 9]
[3 4 5 6 7 1 2 0 8 9] [5 6 7 1 2 3 4 0 9 8]
[4 5 6 7 1 2 3 9 0 8], [7 1 2 3 4 5 6 9 8 0]
]
"""
from sage.combinat.designs.orthogonal_arrays import orthogonal_array
from sage.matrix.constructor import Matrix
from .database import MOLS_constructions
if k is None:
raise TypeError('k must be a positive integer')
try:
Integer(k)
except TypeError:
raise
if k < 0:
raise ValueError('k must be positive')
if n == 1:
matrices = [Matrix([[0]])] * k
elif k >= n:
raise EmptySetError("There exist at most n-1 MOLS of size n if n>=2.")
elif n in MOLS_constructions and k <= MOLS_constructions[n][0]:
_, construction = MOLS_constructions[n]
matrices = construction()[:k]
elif orthogonal_array(k + 2, n, existence=True) is not Unknown:
# Forwarding non-existence results
if orthogonal_array(k + 2, n, existence=True):
pass
else:
raise EmptySetError("There does not exist {} MOLS of order {}!".format(k, n))
# make sure that the first two columns are "11, 12, ..., 1n, 21, 22, ..."
OA = sorted(orthogonal_array(k + 2, n, check=False))
# We first define matrices as lists of n^2 values
matrices = [[] for _ in range(k)]
for L in OA:
for i in range(2, k + 2):
matrices[i-2].append(L[i])
# The real matrices
matrices = [[M[i*n:(i+1)*n] for i in range(n)] for M in matrices]
matrices = [Matrix(M) for M in matrices]
else:
raise NotImplementedError("I don't know how to build {} MOLS of order {}".format(k, n))
if check:
assert are_mutually_orthogonal_latin_squares(matrices)
# partitions have been requested but have not been computed yet
if partitions is True:
partitions = [[[i*n+j for j in range(n)] for i in range(n)],
[[j*n+i for j in range(n)] for i in range(n)]]
for m in matrices:
partition = [[] for i in range(n)]
for i in range(n):
for j in range(n):
partition[m[i,j]].append(i*n+j)
partitions.append(partition)
if partitions:
return partitions
else:
return matrices
def latin_square_product(M, N, *others):
r"""
Return the product of two (or more) latin squares.
Given two Latin Squares `M,N` of respective sizes `m,n`, the direct product
`M\times N` of size `mn` is defined by `(M\times
N)((i_1,i_2),(j_1,j_2))=(M(i_1,j_1),N(i_2,j_2))` where `i_1,j_1\in [m],
i_2,j_2\in [n]`
Each pair of values `(i,j)\in [m]\times [n]` is then relabeled to `in+j`.
This is Lemma 6.25 of [Stinson2004]_.
INPUT:
An arbitrary number of latin squares (greater than 2).
EXAMPLES::
sage: from sage.combinat.designs.latin_squares import latin_square_product
sage: m=designs.mutually_orthogonal_latin_squares(3,4)[0]
sage: latin_square_product(m,m,m)
64 x 64 sparse matrix over Integer Ring (use the '.str()' method to see the entries)
"""
from sage.matrix.constructor import Matrix
m = M.nrows()
n = N.nrows()
D = {((i,j),(ii,jj)):(M[i,ii],N[j,jj])
for i in range(m)
for ii in range(m)
for j in range(n)
for jj in range(n)}
L = lambda i_j: i_j[0] * n + i_j[1]
D = {(L(c[0]), L(c[1])): L(v) for c, v in D.items()}
P = Matrix(D)
if others:
return latin_square_product(P, others[0],*others[1:])
else:
return P
def MOLS_table(start,stop=None,compare=False,width=None):
r"""
Prints the MOLS table that Sage can produce.
INPUT:
- ``start,stop`` (integers) -- print the table of MOLS for value of `n` such
that ``start<=n<stop``. If only one integer is given as input, it is
interpreted as the value of ``stop`` with ``start=0`` (same behaviour as
``range``).
- ``compare`` (boolean) -- if sets to ``True`` the MOLS displays
with `+` and `-` entries its difference with the table from the
Handbook of Combinatorial Designs (2ed).
- ``width`` (integer) -- the width of each column of the table. By default,
it is computed from range of values determined by the parameters ``start``
and ``stop``.
EXAMPLES::
sage: from sage.combinat.designs.latin_squares import MOLS_table
sage: MOLS_table(100)
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
________________________________________________________________________________
0| +oo +oo 1 2 3 4 1 6 7 8 2 10 5 12 4 4 15 16 5 18
20| 4 5 3 22 7 24 4 26 5 28 4 30 31 5 4 5 8 36 4 5
40| 7 40 5 42 5 6 4 46 8 48 6 5 5 52 5 6 7 7 5 58
60| 5 60 5 6 63 7 5 66 5 6 6 70 7 72 5 7 6 6 6 78
80| 9 80 8 82 6 6 6 6 7 88 6 7 6 6 6 6 7 96 6 8
sage: MOLS_table(100, width=4)
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
____________________________________________________________________________________________________
0| +oo +oo 1 2 3 4 1 6 7 8 2 10 5 12 4 4 15 16 5 18
20| 4 5 3 22 7 24 4 26 5 28 4 30 31 5 4 5 8 36 4 5
40| 7 40 5 42 5 6 4 46 8 48 6 5 5 52 5 6 7 7 5 58
60| 5 60 5 6 63 7 5 66 5 6 6 70 7 72 5 7 6 6 6 78
80| 9 80 8 82 6 6 6 6 7 88 6 7 6 6 6 6 7 96 6 8
sage: MOLS_table(100, compare=True)
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
________________________________________________________________________________
0| + +
20|
40|
60| +
80|
sage: MOLS_table(50, 100, compare=True)
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
________________________________________________________________________________
40|
60| +
80|
"""
from .orthogonal_arrays import largest_available_k
if stop is None:
start,stop = 0,start
# make start and stop be congruent to 0 mod 20
start = start - (start%20)
stop = stop-1
stop = stop + (20-(stop%20))
assert start%20 == 0 and stop%20 == 0
if stop <= start:
return
if compare:
handbook_file = open("{}/MOLS_table.txt".format(COMBINATORIAL_DESIGN_DATA_DIR), 'r')
hb = [int(_) for _ in handbook_file.readlines()[9].split(',')]
handbook_file.close()
# choose an appropriate width (needs to be >= 3 because "+oo" should fit)
if width is None:
width = max(3, Integer(stop-1).ndigits(10))
print(" " * (width + 2) + " ".join("{i:>{width}}".format(i=i,width=width)
for i in range(20)))
print(" " * (width + 1) + "_" * ((width + 1) * 20), end="")
for i in range(start,stop):
if i % 20 == 0:
print("\n{:>{width}}|".format(i, width=width), end="")
k = largest_available_k(i)-2
if compare:
if i < 2 or hb[i] == k:
c = ""
elif hb[i] < k:
c = "+"
else:
c = "-"
else:
if i < 2:
c = "+oo"
else:
c = k
print(' {:>{width}}'.format(c, width=width), end="")
| 38.682971 | 123 | 0.53023 |
aced41437da2392384646e480be6673782cf2013 | 1,834 | py | Python | docs/source/conf.py | rzaharia/AppCUI | 9aa37b154e04d0aa3e69a75798a698f591b0c8ca | [
"MIT"
] | null | null | null | docs/source/conf.py | rzaharia/AppCUI | 9aa37b154e04d0aa3e69a75798a698f591b0c8ca | [
"MIT"
] | null | null | null | docs/source/conf.py | rzaharia/AppCUI | 9aa37b154e04d0aa3e69a75798a698f591b0c8ca | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'AppCUI'
copyright = '2021, Gavriluț Dragoș'
author = 'Gavriluț Dragoș'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | 35.269231 | 79 | 0.663577 |
aced41ecf009cabf7d1edb10a05569ee7827bc13 | 51,959 | py | Python | course/views.py | inducer/courseflow | 0f9786e3616dbedf08365d81a731f672b97ba9f5 | [
"Unlicense"
] | null | null | null | course/views.py | inducer/courseflow | 0f9786e3616dbedf08365d81a731f672b97ba9f5 | [
"Unlicense"
] | null | null | null | course/views.py | inducer/courseflow | 0f9786e3616dbedf08365d81a731f672b97ba9f5 | [
"Unlicense"
] | null | null | null | from __future__ import annotations
__copyright__ = "Copyright (C) 2014 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from typing import cast, List, Text
import datetime
from django.shortcuts import ( # noqa
render, get_object_or_404, redirect)
from django.contrib import messages # noqa
from django.core.exceptions import (
PermissionDenied, ObjectDoesNotExist, SuspiciousOperation)
import django.forms as forms
import django.views.decorators.http as http_dec
from django import http
from django.utils.safestring import mark_safe
from django.db import transaction
from django.utils.translation import (
gettext_lazy as _,
gettext,
pgettext,
pgettext_lazy,
)
from django.utils.functional import lazy
from django.contrib.auth.decorators import login_required
from django_select2.forms import Select2Widget
mark_safe_lazy = lazy(mark_safe, str)
from django.views.decorators.cache import cache_control
from crispy_forms.layout import Submit, Layout, Div
from relate.utils import StyledForm, StyledModelForm, string_concat
from bootstrap_datepicker_plus.widgets import DateTimePickerInput
from course.auth import get_pre_impersonation_user
from course.enrollment import (
get_participation_for_request,
get_participation_permissions)
from course.constants import (
participation_permission as pperm,
participation_status,
FLOW_PERMISSION_CHOICES,
flow_rule_kind, FLOW_RULE_KIND_CHOICES
)
from course.models import (
Course,
InstantFlowRequest,
Participation,
FlowSession,
FlowRuleException)
from course.content import get_course_repo
from course.utils import ( # noqa
course_view,
render_course_page,
CoursePageContext,
get_course_specific_language_choices)
# {{{ for mypy
from typing import Tuple, Text, Any, Iterable, Dict, Optional, TYPE_CHECKING # noqa
if TYPE_CHECKING:
from course.content import ( # noqa
FlowDesc,
)
from accounts.models import User # noqa
# }}}
NONE_SESSION_TAG = string_concat("<<<", _("NONE"), ">>>") # noqa
# {{{ home
def home(request: http.HttpRequest) -> http.HttpResponse:
now_datetime = get_now_or_fake_time(request)
current_courses = []
past_courses = []
for course in Course.objects.filter(listed=True):
participation = get_participation_for_request(request, course)
show = True
if course.hidden:
perms = get_participation_permissions(course, participation)
if (pperm.view_hidden_course_page, None) not in perms:
show = False
if show:
if (course.end_date is None
or now_datetime.date() <= course.end_date):
current_courses.append(course)
else:
past_courses.append(course)
def course_sort_key_minor(course):
return course.number if course.number is not None else ""
def course_sort_key_major(course):
return (course.start_date
if course.start_date is not None else now_datetime.date())
current_courses.sort(key=course_sort_key_minor)
past_courses.sort(key=course_sort_key_minor)
current_courses.sort(key=course_sort_key_major, reverse=True)
past_courses.sort(key=course_sort_key_major, reverse=True)
return render(request, "course/home.html", {
"current_courses": current_courses,
"past_courses": past_courses,
})
# }}}
# {{{ pages
def check_course_state(
course: Course, participation: Participation | None) -> None:
"""
Check to see if the course is hidden.
If hidden, only allow access to 'ta' and 'instructor' roles
"""
if course.hidden:
if participation is None:
raise PermissionDenied(_("course page is currently hidden"))
if not participation.has_permission(pperm.view_hidden_course_page):
raise PermissionDenied(_("course page is currently hidden"))
@course_view
def course_page(pctx: CoursePageContext) -> http.HttpResponse:
from course.content import get_processed_page_chunks, get_course_desc
page_desc = get_course_desc(pctx.repo, pctx.course, pctx.course_commit_sha)
chunks = get_processed_page_chunks(
pctx.course, pctx.repo, pctx.course_commit_sha, page_desc,
pctx.role_identifiers(), get_now_or_fake_time(pctx.request),
facilities=pctx.request.relate_facilities)
show_enroll_button = (
pctx.course.accepts_enrollment
and pctx.participation is None)
if pctx.request.user.is_authenticated and Participation.objects.filter(
user=pctx.request.user,
course=pctx.course,
status=participation_status.requested).count():
show_enroll_button = False
messages.add_message(pctx.request, messages.INFO,
_("Your enrollment request is pending. You will be "
"notified once it has been acted upon."))
from course.models import ParticipationPreapproval
if ParticipationPreapproval.objects.filter(
course=pctx.course).exclude(institutional_id=None).count():
if not pctx.request.user.institutional_id:
from django.urls import reverse
messages.add_message(pctx.request, messages.WARNING,
_("This course uses institutional ID for "
"enrollment preapproval, please <a href='%s' "
"role='button' class='btn btn-md btn-primary'>"
"fill in your institutional ID »"
"</a> in your profile.")
% (
reverse("relate-user_profile")
+ "?referer="
+ pctx.request.path
+ "&set_inst_id=1"
)
)
else:
if pctx.course.preapproval_require_verified_inst_id:
messages.add_message(pctx.request, messages.WARNING,
_("Your institutional ID is not verified or "
"preapproved. Please contact your course "
"staff.")
)
return render_course_page(pctx, "course/course-page.html", {
"chunks": chunks,
"show_enroll_button": show_enroll_button,
})
@course_view
def static_page(pctx: CoursePageContext, page_path: str) -> http.HttpResponse:
from course.content import get_staticpage_desc, get_processed_page_chunks
try:
page_desc = get_staticpage_desc(pctx.repo, pctx.course,
pctx.course_commit_sha, "staticpages/"+page_path+".yml")
except ObjectDoesNotExist:
raise http.Http404()
chunks = get_processed_page_chunks(
pctx.course, pctx.repo, pctx.course_commit_sha, page_desc,
pctx.role_identifiers(), get_now_or_fake_time(pctx.request),
facilities=pctx.request.relate_facilities)
return render_course_page(pctx, "course/static-page.html", {
"chunks": chunks,
"show_enroll_button": False,
})
# }}}
# {{{ media
def media_etag_func(request, course_identifier, commit_sha, media_path):
return ":".join([course_identifier, commit_sha, media_path])
@cache_control(max_age=3600*24*31) # cache for a month
@http_dec.condition(etag_func=media_etag_func)
def get_media(request, course_identifier, commit_sha, media_path):
course = get_object_or_404(Course, identifier=course_identifier)
with get_course_repo(course) as repo:
return get_repo_file_response(
repo, "media/" + media_path, commit_sha.encode())
def repo_file_etag_func(request, course_identifier, commit_sha, path):
return ":".join([course_identifier, commit_sha, path])
@cache_control(max_age=3600*24*31) # cache for a month
@http_dec.condition(etag_func=repo_file_etag_func)
def get_repo_file(request, course_identifier, commit_sha, path):
commit_sha = commit_sha.encode()
course = get_object_or_404(Course, identifier=course_identifier)
participation = get_participation_for_request(request, course)
return get_repo_file_backend(
request, course, participation, commit_sha, path)
def current_repo_file_etag_func(
request: http.HttpRequest, course_identifier: str, path: str) -> str:
course = get_object_or_404(Course, identifier=course_identifier)
participation = get_participation_for_request(request, course)
check_course_state(course, participation)
from course.content import get_course_commit_sha
commit_sha = get_course_commit_sha(course, participation)
return ":".join([course_identifier, commit_sha.decode(), path])
@http_dec.condition(etag_func=current_repo_file_etag_func)
def get_current_repo_file(
request: http.HttpRequest, course_identifier: str, path: str
) -> http.HttpResponse:
course = get_object_or_404(Course, identifier=course_identifier)
participation = get_participation_for_request(request, course)
from course.content import get_course_commit_sha
commit_sha = get_course_commit_sha(course, participation)
return get_repo_file_backend(
request, course, participation, commit_sha, path)
def get_repo_file_backend(
request: http.HttpRequest,
course: Course,
participation: Participation | None,
commit_sha: bytes,
path: str,
) -> http.HttpResponse:
# noqa
"""
Check if a file should be accessible. Then call for it if
the permission is not denied.
Order is important here. An in-exam request takes precedence.
Note: an access_role of "public" is equal to "unenrolled"
"""
# check to see if the course is hidden
check_course_state(course, participation)
# set access to public (or unenrolled), student, etc
if request.relate_exam_lockdown:
access_kinds = ["in_exam"]
else:
from course.enrollment import get_participation_permissions
access_kinds = [
arg
for perm, arg in get_participation_permissions(course, participation)
if perm == pperm.access_files_for
and arg is not None]
from course.content import is_repo_file_accessible_as
# retrieve local path for the repo for the course
with get_course_repo(course) as repo:
if not is_repo_file_accessible_as(access_kinds, repo, commit_sha, path):
raise PermissionDenied()
return get_repo_file_response(repo, path, commit_sha)
def get_repo_file_response(
repo: Any, path: str, commit_sha: bytes
) -> http.HttpResponse:
from course.content import get_repo_blob_data_cached
try:
data = get_repo_blob_data_cached(repo, path, commit_sha)
except ObjectDoesNotExist:
raise http.Http404()
from mimetypes import guess_type
content_type, __ = guess_type(path)
if content_type is None:
content_type = "application/octet-stream"
return http.HttpResponse(data, content_type=content_type)
# }}}
# {{{ time travel
class FakeTimeForm(StyledForm):
time = forms.DateTimeField(
widget=DateTimePickerInput(
options={"format": "YYYY-MM-DD HH:mm", "sideBySide": True}),
label=_("Time"))
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper.add_input(
# Translators: "set" fake time.
Submit("set", _("Set")))
self.helper.add_input(
# Translators: "unset" fake time.
Submit("unset", _("Unset")))
def get_fake_time(request: http.HttpRequest) -> datetime.datetime | None:
if request is not None and "relate_fake_time" in request.session:
from django.conf import settings
from pytz import timezone
tz = timezone(settings.TIME_ZONE)
return tz.localize( # type: ignore
datetime.datetime.fromtimestamp(
request.session["relate_fake_time"]))
else:
return None
def get_now_or_fake_time(request: http.HttpRequest) -> datetime.datetime:
fake_time = get_fake_time(request)
if fake_time is None:
from django.utils.timezone import now
return now()
else:
return fake_time
def may_set_fake_time(user: User | None) -> bool:
if user is None:
return False
return Participation.objects.filter(
user=user,
roles__permissions__permission=pperm.set_fake_time
).count() > 0
@login_required
def set_fake_time(request):
# allow staff to set fake time when impersonating
pre_imp_user = get_pre_impersonation_user(request)
if not (
may_set_fake_time(request.user) or (
pre_imp_user is not None
and may_set_fake_time(pre_imp_user))):
raise PermissionDenied(_("may not set fake time"))
if request.method == "POST":
form = FakeTimeForm(request.POST, request.FILES)
do_set = "set" in form.data
if form.is_valid():
fake_time = form.cleaned_data["time"]
if do_set:
import time
request.session["relate_fake_time"] = \
time.mktime(fake_time.timetuple())
else:
request.session.pop("relate_fake_time", None)
else:
if "relate_fake_time" in request.session:
form = FakeTimeForm({
"time": get_fake_time(request)
})
else:
form = FakeTimeForm()
return render(request, "generic-form.html", {
"form": form,
"form_description": _("Set fake time"),
})
def fake_time_context_processor(request):
return {
"fake_time": get_fake_time(request),
}
# }}}
# {{{ space travel (i.e. pretend to be in facility)
class FakeFacilityForm(StyledForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from course.utils import get_facilities_config
self.fields["facilities"] = forms.MultipleChoiceField(
choices=(
(name, name)
for name in get_facilities_config()),
widget=forms.CheckboxSelectMultiple,
required=False,
label=_("Facilities"),
help_text=_("Facilities you wish to pretend to be in"))
self.fields["custom_facilities"] = forms.CharField(
label=_("Custom facilities"),
required=False,
help_text=_("More (non-predefined) facility names, separated "
"by commas, which would like to pretend to be in"))
self.fields["add_pretend_facilities_header"] = forms.BooleanField(
required=False,
initial=True,
label=_("Add fake facililities header"),
help_text=_("Add a page header to every page rendered "
"while pretending to be in a facility, as a reminder "
"that this pretending is in progress."))
self.helper.add_input(
# Translators: "set" fake facility.
Submit("set", _("Set")))
self.helper.add_input(
# Translators: "unset" fake facility.
Submit("unset", _("Unset")))
def may_set_pretend_facility(user: User | None) -> bool:
if user is None:
return False
return Participation.objects.filter(
user=user,
roles__permissions__permission=pperm.set_pretend_facility
).count() > 0
@login_required
def set_pretend_facilities(request):
# allow staff to set fake time when impersonating
pre_imp_user = get_pre_impersonation_user(request)
if not (
may_set_pretend_facility(request.user) or (
pre_imp_user is not None
and may_set_pretend_facility(pre_imp_user))):
raise PermissionDenied(_("may not pretend facilities"))
if request.method == "POST":
form = FakeFacilityForm(request.POST)
do_set = "set" in form.data
if form.is_valid():
if do_set:
pretend_facilities = (
form.cleaned_data["facilities"]
+ [s.strip()
for s in (
form.cleaned_data["custom_facilities"].split(","))
if s.strip()])
request.session["relate_pretend_facilities"] = pretend_facilities
request.session["relate_pretend_facilities_header"] = \
form.cleaned_data["add_pretend_facilities_header"]
else:
request.session.pop("relate_pretend_facilities", None)
else:
if "relate_pretend_facilities" in request.session:
form = FakeFacilityForm({
"facilities": [],
"custom_facilities": ",".join(
request.session["relate_pretend_facilities"]),
"add_pretend_facilities_header":
request.session["relate_pretend_facilities_header"],
})
else:
form = FakeFacilityForm()
return render(request, "generic-form.html", {
"form": form,
"form_description": _("Pretend to be in Facilities"),
})
def pretend_facilities_context_processor(request):
return {
"pretend_facilities": request.session.get(
"relate_pretend_facilities", []),
"add_pretend_facilities_header":
request.session.get("relate_pretend_facilities_header", True),
}
# }}}
# {{{ instant flow requests
class InstantFlowRequestForm(StyledForm):
def __init__(self, flow_ids, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["flow_id"] = forms.ChoiceField(
choices=[(fid, fid) for fid in flow_ids],
required=True,
label=_("Flow ID"),
widget=Select2Widget())
self.fields["duration_in_minutes"] = forms.IntegerField(
required=True, initial=20,
label=pgettext_lazy("Duration for instant flow",
"Duration in minutes"))
self.helper.add_input(
Submit(
"add",
pgettext("Add an instant flow", "Add")))
self.helper.add_input(
Submit(
"cancel",
pgettext("Cancel all instant flow(s)", "Cancel all")))
@course_view
def manage_instant_flow_requests(pctx):
if not pctx.has_permission(pperm.manage_instant_flow_requests):
raise PermissionDenied()
from course.content import list_flow_ids
flow_ids = list_flow_ids(pctx.repo, pctx.course_commit_sha)
request = pctx.request
if request.method == "POST":
form = InstantFlowRequestForm(flow_ids, request.POST, request.FILES)
if "add" in request.POST:
op = "add"
elif "cancel" in request.POST:
op = "cancel"
else:
raise SuspiciousOperation(_("invalid operation"))
now_datetime = get_now_or_fake_time(pctx.request)
if form.is_valid():
if op == "add":
from datetime import timedelta
ifr = InstantFlowRequest()
ifr.course = pctx.course
ifr.flow_id = form.cleaned_data["flow_id"]
ifr.start_time = now_datetime
ifr.end_time = (
now_datetime + timedelta(
minutes=form.cleaned_data["duration_in_minutes"]))
ifr.save()
else:
assert op == "cancel"
(InstantFlowRequest.objects
.filter(
course=pctx.course,
start_time__lte=now_datetime,
end_time__gte=now_datetime,
cancelled=False)
.order_by("start_time")
.update(cancelled=True))
else:
form = InstantFlowRequestForm(flow_ids)
return render_course_page(pctx, "course/generic-course-form.html", {
"form": form,
"form_description": _("Manage Instant Flow Requests"),
})
# }}}
# {{{ test flow
class FlowTestForm(StyledForm):
def __init__(self, flow_ids, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["flow_id"] = forms.ChoiceField(
choices=[(fid, fid) for fid in flow_ids],
required=True,
label=_("Flow ID"),
widget=Select2Widget())
self.helper.add_input(
Submit(
"test",
mark_safe_lazy(
string_concat(
pgettext("Start an activity", "Go"),
" »")),
))
@course_view
def test_flow(pctx):
if not pctx.has_permission(pperm.test_flow):
raise PermissionDenied()
from course.content import list_flow_ids
flow_ids = list_flow_ids(pctx.repo, pctx.course_commit_sha)
request = pctx.request
if request.method == "POST":
form = FlowTestForm(flow_ids, request.POST, request.FILES)
if "test" not in request.POST:
raise SuspiciousOperation(_("invalid operation"))
if form.is_valid():
return redirect("relate-view_start_flow",
pctx.course.identifier,
form.cleaned_data["flow_id"])
else:
form = FlowTestForm(flow_ids)
return render_course_page(pctx, "course/generic-course-form.html", {
"form": form,
"form_description": _("Test Flow"),
})
# }}}
# {{{ flow access exceptions
class ParticipationChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
user = obj.user
return (
"%(user_email)s - %(user_fullname)s"
% {
"user_email": user.email,
"user_fullname": user.get_full_name()
})
class ExceptionStage1Form(StyledForm):
def __init__(self, course, flow_ids, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["participation"] = ParticipationChoiceField(
queryset=(Participation.objects
.filter(
course=course,
status=participation_status.active,
)
.order_by("user__last_name")),
required=True,
help_text=_("Select participant for whom exception is to "
"be granted."),
label=_("Participant"),
widget=Select2Widget())
self.fields["flow_id"] = forms.ChoiceField(
choices=[(fid, fid) for fid in flow_ids],
required=True,
label=_("Flow ID"))
self.helper.add_input(
Submit(
"next",
mark_safe_lazy(
string_concat(
pgettext("Next step", "Next"),
" »"))))
@course_view
def grant_exception(pctx):
if not pctx.has_permission(pperm.grant_exception):
raise PermissionDenied(_("may not grant exceptions"))
from course.content import list_flow_ids
flow_ids = list_flow_ids(pctx.repo, pctx.course_commit_sha)
request = pctx.request
if request.method == "POST":
form = ExceptionStage1Form(pctx.course, flow_ids, request.POST)
if form.is_valid():
return redirect("relate-grant_exception_stage_2",
pctx.course.identifier,
form.cleaned_data["participation"].id,
form.cleaned_data["flow_id"])
else:
form = ExceptionStage1Form(pctx.course, flow_ids)
return render_course_page(pctx, "course/generic-course-form.html", {
"form": form,
"form_description": _("Grant Exception"),
})
def strify_session_for_exception(session: FlowSession) -> str:
from relate.utils import as_local_time, format_datetime_local
# Translators: %s is the string of the start time of a session.
result = (_("started at %s") % format_datetime_local(
as_local_time(session.start_time)))
if session.access_rules_tag:
result += _(" tagged '%s'") % session.access_rules_tag
return result
class CreateSessionForm(StyledForm):
def __init__(
self,
session_tag_choices: list[tuple[str, str]],
default_tag: str | None,
create_session_is_override: bool,
*args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.fields["access_rules_tag_for_new_session"] = forms.ChoiceField(
choices=session_tag_choices,
initial=default_tag,
help_text=_("If you click 'Create session', this tag will be "
"applied to the new session."),
label=_("Access rules tag for new session"))
if create_session_is_override:
self.helper.add_input(
Submit(
"create_session",
_("Create session (override rules)")))
else:
self.helper.add_input(
Submit(
"create_session",
_("Create session")))
class ExceptionStage2Form(StyledForm):
def __init__(
self, sessions: list[FlowSession], *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.fields["session"] = forms.ChoiceField(
choices=(
(session.id, strify_session_for_exception(session))
for session in sessions),
help_text=_("The rules that currently apply to selected "
"session will provide the default values for the rules "
"on the next page."),
label=_("Session"))
self.helper.add_input(
Submit(
"next",
mark_safe_lazy(
string_concat(
pgettext("Next step", "Next"),
" »"))))
@course_view
def grant_exception_stage_2(
pctx: CoursePageContext, participation_id: str, flow_id: str
) -> http.HttpResponse:
if not pctx.has_permission(pperm.grant_exception):
raise PermissionDenied(_("may not grant exceptions"))
# {{{ get flow data
participation = get_object_or_404(Participation, id=participation_id)
form_text = (
string_concat(
"<div class='well'>",
_("Granting exception to '%(participation)s' for "
"'%(flow_id)s'."),
"</div>")
% {
"participation": participation,
"flow_id": flow_id})
from course.content import get_flow_desc
try:
flow_desc = get_flow_desc(pctx.repo, pctx.course, flow_id,
pctx.course_commit_sha)
except ObjectDoesNotExist:
raise http.Http404()
now_datetime = get_now_or_fake_time(pctx.request)
if hasattr(flow_desc, "rules"):
access_rules_tags = getattr(flow_desc.rules, "tags", [])
else:
access_rules_tags = []
from course.utils import get_session_start_rule
session_start_rule = get_session_start_rule(pctx.course, participation,
flow_id, flow_desc, now_datetime)
create_session_is_override = False
if not session_start_rule.may_start_new_session:
create_session_is_override = True
form_text += ("<div class='alert alert-info'>%s</div>" % (
string_concat(
"<i class='fa fa-info-circle'></i> ",
_("Creating a new session is (technically) not allowed "
"by course rules. Clicking 'Create Session' anyway will "
"override this rule."))))
session_tag_choices = [
(tag, tag)
for tag in access_rules_tags] + [(NONE_SESSION_TAG, NONE_SESSION_TAG)]
default_tag = session_start_rule.tag_session
if default_tag is None:
default_tag = NONE_SESSION_TAG
else:
if default_tag not in access_rules_tags:
session_tag_choices.insert(0, (default_tag, default_tag))
# }}}
def find_sessions() -> list[FlowSession]:
return list(FlowSession.objects
.filter(
participation=participation,
flow_id=flow_id)
.order_by("start_time"))
exception_form = None
request = pctx.request
if request.method == "POST":
exception_form = ExceptionStage2Form(find_sessions(), request.POST)
create_session_form = CreateSessionForm(
session_tag_choices, default_tag, create_session_is_override,
request.POST)
if "create_session" in request.POST or "next" in request.POST:
pass
else:
raise SuspiciousOperation(_("invalid command"))
if create_session_form.is_valid() and "create_session" in request.POST:
from course.flow import start_flow
access_rules_tag = (
create_session_form.cleaned_data[
"access_rules_tag_for_new_session"])
if access_rules_tag == NONE_SESSION_TAG:
access_rules_tag = None
new_session = start_flow(pctx.repo, pctx.course, participation,
user=participation.user,
flow_id=flow_id,
flow_desc=flow_desc,
session_start_rule=session_start_rule,
now_datetime=now_datetime)
if access_rules_tag is not None:
new_session.access_rules_tag = access_rules_tag
new_session.save()
exception_form = None
messages.add_message(
pctx.request, messages.SUCCESS,
_("A new session%(tag)s was created for '%(participation)s' "
"for '%(flow_id)s'.")
% {
"tag":
_(" tagged '%s'") % access_rules_tag
if access_rules_tag is not None else "",
"participation": participation,
"flow_id": flow_id})
elif exception_form.is_valid() and "next" in request.POST: # type: ignore
return redirect(
"relate-grant_exception_stage_3",
pctx.course.identifier,
participation.id,
flow_id,
exception_form.cleaned_data["session"]) # type: ignore
else:
create_session_form = CreateSessionForm(
session_tag_choices, default_tag, create_session_is_override)
if exception_form is None:
exception_form = ExceptionStage2Form(find_sessions())
return render_course_page(pctx, "course/generic-course-form.html", {
"forms": [exception_form, create_session_form],
"form_text": form_text,
"form_description": _("Grant Exception"),
})
class ExceptionStage3Form(StyledForm):
def __init__(
self,
default_data: dict,
flow_desc: FlowDesc,
base_session_tag: str,
*args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
rules = getattr(flow_desc, "rules", object())
tags = getattr(rules, "tags", [])
layout = []
if tags:
tags = [NONE_SESSION_TAG] + tags
if base_session_tag is not None and base_session_tag not in tags:
tags.append(base_session_tag)
self.fields["set_access_rules_tag"] = forms.ChoiceField(
choices=[(tag, tag) for tag in tags],
initial=(base_session_tag
if base_session_tag is not None
else NONE_SESSION_TAG),
label=_("Set access rules tag"))
self.fields["restrict_to_same_tag"] = forms.BooleanField(
label=_("Exception only applies to sessions "
"with the above tag"),
required=False,
initial=default_data.get("restrict_to_same_tag", True))
layout.append(
Div("set_access_rules_tag", "restrict_to_same_tag",
css_class="well"))
access_fields = ["create_access_exception", "access_expires"]
self.fields["create_access_exception"] = forms.BooleanField(
required=False, help_text=_("If set, an exception for the "
"access rules will be created."), initial=True,
label=_("Create access rule exception"))
self.fields["access_expires"] = forms.DateTimeField(
widget=DateTimePickerInput(
options={"format": "YYYY-MM-DD HH:mm", "sideBySide": True,
"showClear": True}),
required=False,
label=pgettext_lazy("Time when access expires", "Access expires"),
help_text=_("At the specified time, the special access granted below "
"will expire "
"and revert to being the same as for the rest of the class. "
"This field may "
"be empty, in which case this access does not expire. Note also that "
"the grading-related entries (such as 'due date' and 'credit percent') "
"do not expire and remain valid indefinitely, unless overridden by "
"another exception."))
for key, name in FLOW_PERMISSION_CHOICES:
self.fields[key] = forms.BooleanField(label=name, required=False,
initial=default_data.get(key) or False)
access_fields.append(key)
layout.append(Div(*access_fields, css_class="well"))
self.fields["create_grading_exception"] = forms.BooleanField(
required=False, help_text=_("If set, an exception for the "
"grading rules will be created."), initial=True,
label=_("Create grading rule exception"))
self.fields["due_same_as_access_expiration"] = forms.BooleanField(
required=False, help_text=_("If set, the 'Due time' field will be "
"disregarded."),
initial=default_data.get("due_same_as_access_expiration") or False,
label=_("Due same as access expiration"))
self.fields["due"] = forms.DateTimeField(
widget=DateTimePickerInput(
options={"format": "YYYY-MM-DD HH:mm", "sideBySide": True}),
required=False,
help_text=_("The due time shown to the student. Also, the "
"time after which "
"any session under these rules is subject to expiration."),
initial=default_data.get("due"),
label=_("Due time"))
self.fields["generates_grade"] = forms.BooleanField(required=False,
initial=default_data.get("generates_grade", True),
label=_("Generates grade"))
self.fields["credit_percent"] = forms.FloatField(required=False,
initial=default_data.get("credit_percent"),
label=_("Credit percent"))
self.fields["bonus_points"] = forms.FloatField(required=False,
initial=default_data.get("bonus_points"),
label=_("Bonus points"))
self.fields["max_points"] = forms.FloatField(required=False,
initial=default_data.get("max_points"),
label=_("Maximum number of points (for percentage)"))
self.fields["max_points_enforced_cap"] = forms.FloatField(required=False,
initial=default_data.get("max_points_enforced_cap"),
label=_("Maximum number of points (enforced cap)"))
layout.append(Div("create_grading_exception",
"due_same_as_access_expiration", "due",
"generates_grade",
"credit_percent", "bonus_points", "max_points",
"max_points_enforced_cap",
css_class="well"))
self.fields["comment"] = forms.CharField(
widget=forms.Textarea, required=True,
initial=default_data.get("comment"),
label=_("Comment"))
layout.append("comment")
self.helper.add_input(
Submit(
"save", _("Save")))
self.helper.layout = Layout(*layout)
def clean(self):
access_expires = self.cleaned_data.get("access_expires")
due_same_as_access_expiration = self.cleaned_data.get(
"due_same_as_access_expiration")
if (not access_expires and due_same_as_access_expiration):
self.add_error(
"access_expires",
_("Must specify access expiration if 'due same "
"as access expiration' is set."))
@course_view
@transaction.atomic
def grant_exception_stage_3(
pctx: CoursePageContext,
participation_id: int,
flow_id: str,
session_id: int) -> http.HttpResponse:
if not pctx.has_permission(pperm.grant_exception):
raise PermissionDenied(_("may not grant exceptions"))
participation = get_object_or_404(Participation, id=participation_id)
from course.content import get_flow_desc
try:
flow_desc = get_flow_desc(pctx.repo, pctx.course, flow_id,
pctx.course_commit_sha)
except ObjectDoesNotExist:
raise http.Http404()
session = FlowSession.objects.get(id=int(session_id))
now_datetime = get_now_or_fake_time(pctx.request)
from course.utils import (
get_session_access_rule,
get_session_grading_rule)
access_rule = get_session_access_rule(session, flow_desc, now_datetime)
grading_rule = get_session_grading_rule(session, flow_desc, now_datetime)
request = pctx.request
if request.method == "POST":
form = ExceptionStage3Form(
{}, flow_desc, session.access_rules_tag, request.POST)
if form.is_valid():
permissions = [
key
for key, __ in FLOW_PERMISSION_CHOICES
if form.cleaned_data[key]]
from course.validation import (
validate_session_access_rule,
validate_session_grading_rule,
ValidationContext)
from relate.utils import dict_to_struct
vctx = ValidationContext(
repo=pctx.repo,
commit_sha=pctx.course_commit_sha)
flow_desc = get_flow_desc(pctx.repo,
pctx.course,
flow_id, pctx.course_commit_sha)
tags: list[str] = []
if hasattr(flow_desc, "rules"):
tags = cast(List[str], getattr(flow_desc.rules, "tags", []))
exceptions_created = []
restricted_to_same_tag = bool(
form.cleaned_data.get("restrict_to_same_tag")
and session.access_rules_tag is not None)
# {{{ put together access rule
if form.cleaned_data["create_access_exception"]:
new_access_rule = {"permissions": permissions}
if restricted_to_same_tag:
new_access_rule["if_has_tag"] = session.access_rules_tag
validate_session_access_rule(
vctx, _("newly created exception"),
dict_to_struct(new_access_rule), tags)
fre_access = FlowRuleException(
flow_id=flow_id,
participation=participation,
expiration=form.cleaned_data["access_expires"],
creator=pctx.request.user,
comment=form.cleaned_data["comment"],
kind=flow_rule_kind.access,
rule=new_access_rule)
fre_access.save()
exceptions_created.append(
dict(FLOW_RULE_KIND_CHOICES)[fre_access.kind])
# }}}
session_access_rules_tag_changed = False
if not restricted_to_same_tag:
new_access_rules_tag = form.cleaned_data.get("set_access_rules_tag")
if new_access_rules_tag == NONE_SESSION_TAG:
new_access_rules_tag = None
if session.access_rules_tag != new_access_rules_tag:
session.access_rules_tag = new_access_rules_tag
session.save()
session_access_rules_tag_changed = True
if new_access_rules_tag is not None:
msg = _("Access rules tag of the selected session "
"updated to '%s'.") % new_access_rules_tag
else:
msg = _(
"Removed access rules tag of the selected session.")
messages.add_message(pctx.request, messages.SUCCESS, msg)
# {{{ put together grading rule
if form.cleaned_data["create_grading_exception"]:
due = form.cleaned_data["due"]
if form.cleaned_data["due_same_as_access_expiration"]:
due = form.cleaned_data["access_expires"]
descr = gettext("Granted exception")
if form.cleaned_data["credit_percent"] is not None:
descr += string_concat(" (%.1f%% ", gettext("credit"), ")") \
% form.cleaned_data["credit_percent"]
due_local_naive = due
if due_local_naive is not None:
from relate.utils import as_local_time
due_local_naive = (
as_local_time(due_local_naive)
.replace(tzinfo=None))
new_grading_rule = {
"description": descr,
}
if due_local_naive is not None:
new_grading_rule["due"] = due_local_naive
for attr_name in ["credit_percent", "bonus_points",
"max_points", "max_points_enforced_cap", "generates_grade"]:
if form.cleaned_data[attr_name] is not None:
new_grading_rule[attr_name] = form.cleaned_data[attr_name]
if restricted_to_same_tag:
new_grading_rule["if_has_tag"] = session.access_rules_tag
validate_session_grading_rule(
vctx, _("newly created exception"),
dict_to_struct(new_grading_rule), tags,
grading_rule.grade_identifier)
fre_grading = FlowRuleException(
flow_id=flow_id,
participation=participation,
creator=pctx.request.user,
comment=form.cleaned_data["comment"],
kind=flow_rule_kind.grading,
rule=new_grading_rule)
fre_grading.save()
exceptions_created.append(
dict(FLOW_RULE_KIND_CHOICES)[fre_grading.kind])
# }}}
if exceptions_created:
for exc in exceptions_created:
messages.add_message(pctx.request, messages.SUCCESS,
_(
"'%(exception_type)s' exception granted to "
"'%(participation)s' for '%(flow_id)s'.")
% {
"exception_type": exc,
"participation": participation,
"flow_id": flow_id})
else:
if session_access_rules_tag_changed:
messages.add_message(
pctx.request, messages.WARNING,
_(
"No other exception granted to the given flow "
"session of '%(participation)s' "
"for '%(flow_id)s'.")
% {
"participation": participation,
"flow_id": flow_id})
else:
messages.add_message(pctx.request, messages.WARNING,
_(
"No exception granted to the given flow "
"session of '%(participation)s' "
"for '%(flow_id)s'.")
% {
"participation": participation,
"flow_id": flow_id})
return redirect(
"relate-grant_exception",
pctx.course.identifier)
else:
data = {
"restrict_to_same_tag": session.access_rules_tag is not None,
#"due_same_as_access_expiration": True,
"due": grading_rule.due,
"generates_grade": grading_rule.generates_grade,
"credit_percent": grading_rule.credit_percent,
"bonus_points": grading_rule.bonus_points,
"max_points": grading_rule.max_points,
"max_points_enforced_cap": grading_rule.max_points_enforced_cap,
}
for perm in access_rule.permissions:
data[perm] = True
form = ExceptionStage3Form(data, flow_desc, session.access_rules_tag)
return render_course_page(pctx, "course/generic-course-form.html", {
"form": form,
"form_description": _("Grant Exception"),
"form_text": string_concat(
"<div class='well'>",
_("Granting exception to '%(participation)s' "
"for '%(flow_id)s' (session %(session)s)."),
"</div>")
% {
"participation": participation,
"flow_id": flow_id,
"session": strify_session_for_exception(session)},
})
# }}}
# {{{ ssh keypair
@login_required
def generate_ssh_keypair(request):
if not request.user.is_staff:
raise PermissionDenied(_("only staff may use this tool"))
from paramiko import RSAKey
key_class = RSAKey
prv = key_class.generate(bits=2048)
import io
prv_bio = io.StringIO()
prv.write_private_key(prv_bio)
prv_bio_read = io.StringIO(prv_bio.getvalue())
pub = key_class.from_private_key(prv_bio_read)
pub_bio = io.StringIO()
pub_bio.write(f"{pub.get_name()} {pub.get_base64()} relate-course-key")
return render(request, "course/keypair.html", {
"public_key": prv_bio.getvalue(),
"private_key": pub_bio.getvalue(),
})
# }}}
# {{{ celery task monitoring
@login_required
def monitor_task(request, task_id):
from celery.result import AsyncResult
from celery import states
async_res = AsyncResult(task_id)
progress_percent = None
progress_statement = None
if async_res.state == "PROGRESS":
meta = async_res.info
current = meta["current"]
total = meta["total"]
if total > 0:
progress_percent = 100 * (current / total)
progress_statement = (
_("%(current)d out of %(total)d items processed.")
% {"current": current, "total": total})
if async_res.state == states.SUCCESS:
if (isinstance(async_res.result, dict)
and "message" in async_res.result):
progress_statement = async_res.result["message"]
traceback = None
if request.user.is_staff and async_res.state == states.FAILURE:
traceback = async_res.traceback
return render(request, "course/task-monitor.html", {
"state": async_res.state,
"progress_percent": progress_percent,
"progress_statement": progress_statement,
"traceback": traceback,
})
# }}}
# {{{ edit course
class EditCourseForm(StyledModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["identifier"].disabled = True
self.fields["active_git_commit_sha"].disabled = True
self.helper.add_input(
Submit("submit", _("Update")))
class Meta:
model = Course
exclude = (
"participants",
"trusted_for_markup",
)
widgets = {
"start_date": DateTimePickerInput(options={"format": "YYYY-MM-DD"}),
"end_date": DateTimePickerInput(options={"format": "YYYY-MM-DD"}),
"force_lang": forms.Select(
choices=get_course_specific_language_choices()),
}
@course_view
def edit_course(pctx):
if not pctx.has_permission(pperm.edit_course):
raise PermissionDenied()
request = pctx.request
instance = pctx.course
if request.method == "POST":
form = EditCourseForm(request.POST, instance=pctx.course)
if form.is_valid():
if form.has_changed():
instance = form.save()
messages.add_message(
request, messages.SUCCESS,
_("Successfully updated course settings."))
else:
messages.add_message(
request, messages.INFO,
_("No change was made on the settings."))
else:
messages.add_message(request, messages.ERROR,
_("Failed to update course settings."))
form = EditCourseForm(instance=instance)
# Render the page with course.force_lang, in case force_lang was updated
from course.utils import LanguageOverride
with LanguageOverride(instance):
return render_course_page(pctx, "course/generic-course-form.html", {
"form_description": _("Edit Course"),
"form": form
})
# }}}
# vim: foldmethod=marker
| 35.588356 | 85 | 0.589349 |
aced42a4ff3df93186eb448949c202e27a30e8b7 | 10,236 | py | Python | godefroid/policies.py | AbdeNacerHamidi/microx | d60e7aee6096d012594d2eb59dfaed7612bd232c | [
"Apache-2.0"
] | 2 | 2021-08-07T16:54:57.000Z | 2021-11-17T10:58:36.000Z | godefroid/policies.py | awesie/microx | 490cdc4a3b9f023ee058142b58051e32bc2bbd9c | [
"Apache-2.0"
] | null | null | null | godefroid/policies.py | awesie/microx | 490cdc4a3b9f023ee058142b58051e32bc2bbd9c | [
"Apache-2.0"
] | null | null | null | import sys
from enum import Enum
import secrets
import collections
class DefaultMemoryPolicy:
def __init__(self):
pass
def handle_store(self, addr):
# Does this policy care about stores?
return True
def handle_load(self, addr):
# Does this policy care about loads?
return False
def read_before_write(self, addr, size, data):
# sys.stdout.write("Read before write of {:x} - {:x}\n".format(addr, addr + size))
return data
def write_before_read(self, addr, size, data):
# sys.stdout.write("Write before read of {:x} - {:x}\n".format(addr, addr + size))
return data
class InputType(Enum):
DATA = 0
POINTER = 1
COMPUTED = 2
class InputMemoryPolicy:
# TODO(artem): Make this a configurable value or based on address size
POINTER_INCREMENT = int(0x1000 / 4)
def __deepcopy__(self, memo):
# Just create a new one. We do not care about
# copying access ranges for what we're doing
cp = InputMemoryPolicy(self._address_size*8,
(self._start, self._end),
(self._pointers_start, self._pointers_end))
return cp
def __init__(self, address_size, argument_vas, pointer_vas):
self._address_size = int(address_size / 8)
self._known_inputs = {}
self._known_outputs = {}
# Where initial arguments will be found (i.e., the stack)
self._start = argument_vas[0]
self._end = argument_vas[1]
assert self._start < self._end
# Things that look like pointers all point *to* this range
self._pointers_start = pointer_vas[0]
self._pointers_end = pointer_vas[1]
assert self._pointers_start < self._pointers_end
self._pointer_watermark = self._pointers_start
# maps address (such as stack) -> where it points to (in "heap")
self._pointer_map = {}
def add_output(self, addr, size):
sys.stdout.write(
f"!!! Manually adding output at {addr:08x} - {addr+size:08x}\n"
)
self._known_outputs[addr] = size
def pointer_to_bytes(self, ptr):
return int(ptr).to_bytes(self._address_size, byteorder="little")
def generate_pointer(self):
# start at the current "high water mark" for input pointers
new_ptr = self._pointer_watermark
assert (new_ptr + self._address_size) < self._pointers_end
# move watermark to next area, further down from here
self._pointer_watermark += InputMemoryPolicy.POINTER_INCREMENT
# TODO(artem): Handle the case where we run out of pointer space :)
assert self._pointer_watermark < self._pointers_end
sys.stdout.write(
"Generating a pointer going to {:08x} in pointer space\n".format(new_ptr)
)
return new_ptr
def generate_random(self, size):
# NOTE(artem): Consider a seeded random for reproducability
return secrets.token_bytes(size)
def handle_store(self, addr):
if (self._start <= addr <= self._end) or (
self._pointers_start <= addr <= self._pointers_end
):
# Does this policy care about stores?
return True
else:
# This address is outside policy bounds
return False
def handle_load(self, addr):
if (self._start <= addr <= self._end) or (
self._pointers_start <= addr <= self._pointers_end
):
# Does this policy care about loads?
return True
else:
# This address is outside policy bounds
return False
def read_before_write(self, addr, size, data):
sys.stdout.write(f"Read-before-write of {size} bytes\n")
sys.stdout.write(f" at {addr:08x} [{addr:08x} - {addr+size:08x}]\n")
new_data = data
# TODO(artem): Check if this address+size has been previously read
if self._address_size == size:
# when reading a pointer size, at first, always assume the value is a pointer
# and generate a pointer into pointer space aka ("heap")
ptr = self.generate_pointer()
self._pointer_map[addr] = ptr
new_data = self.pointer_to_bytes(ptr)
else:
# When reading a non-pointer size, return a random value
new_data = self.generate_random(size)
# Mark the memory cell containing this value as used
self._known_inputs[addr] = size
assert len(data) == len(new_data)
return new_data
def write_before_read(self, addr, size, data):
sys.stdout.write(f"Write-before-read of {size} bytes")
sys.stdout.write(f" at {addr:08x} [{addr:08x} - {addr+size:08x}]\n")
self._known_outputs[addr] = size
return data
def _make_itype(self, addr, size):
ptr = self.get_pointer(addr)
if ptr is not None:
return (size, InputType.POINTER, ptr)
elif size != 0:
# TODO(artem): Keep track of initial values returned
return (size, InputType.DATA, 0)
elif size == 0:
return (size, InputType.COMPUTED, 0)
def get_outputs(self):
# a copy of get_inputs that doesn't care about
# the kind of output, at least for now
output_addrs = sorted(self._known_outputs.keys())
merged_addrs = collections.OrderedDict()
# no outputs = blank dict
if 0 == len(output_addrs):
return merged_addrs
# process the base case of the first input
entry = output_addrs[0]
merged_addrs[entry] = self._known_outputs[entry]
watermark = entry + self._known_outputs[entry]
# start merging overlapping input areas
for addr in output_addrs[1:]:
write_size = self._known_outputs[addr]
if addr >= watermark:
# Next output address is greater than addr+size of previous
# This means a new output "area" was found
merged_addrs[addr] = write_size
watermark = addr + write_size
entry = addr
else:
# This output address at least partially overlaps
# the previous output address. Merge them
if (addr + write_size) > watermark:
new_watermark = addr + write_size
merged_addrs[entry] = new_watermark - entry
watermark = new_watermark
# entry not updated since we extended the area
else:
# This entry is entirely subsumed by the previous output area
pass
return merged_addrs
def get_inputs(self):
# loop through inputs. Get ranges/bytes
input_addrs = sorted(self._known_inputs.keys())
# return an ordered dict of
# address : size of input area
merged_addrs = collections.OrderedDict()
# no inputs = blank dict
if 0 == len(input_addrs):
return merged_addrs
# process the base case of the first input
entry = input_addrs[0]
merged_addrs[entry] = self._make_itype(entry, self._known_inputs[entry])
watermark = entry + self._known_inputs[entry]
# start merging overlapping input areas
for addr in input_addrs[1:]:
read_size = self._known_inputs[addr]
if addr >= watermark:
# Next input address is greater than addr+size of previous
# This means a new input "area" was found
merged_addrs[addr] = self._make_itype(addr, read_size)
watermark = addr + read_size
entry = addr
else:
# This input address at least partially overlaps
# the previous input address. Merge them
if (addr + read_size) > watermark:
new_watermark = addr + read_size
merged_addrs[entry] = self._make_itype(addr, new_watermark - entry)
watermark = new_watermark
# entry not updated since we extended the area
else:
# This entry is entirely subsumed by the previous input area
pass
return merged_addrs
def get_pointer(self, addr):
# Return address it points to, or None if not a pointer
return self._pointer_map.get(addr, None)
def in_input_range(self, addr):
# is it a known input?
if addr in self._known_inputs:
return True
# is it a pointer to the input heap?
if self._pointers_start <= addr < self._pointers_end:
if addr > self._pointer_watermark:
self._pointer_watermark += InputMemoryPolicy.POINTER_INCREMENT
assert self._pointer_watermark < self._pointers_end
return True
# is it on the stack?
if self._start <= addr < self._end:
return True
# Its probably not an input
return False
def handle_compute(self, result, base, scale, index, disp, size, hint):
parts = (base, scale, index, disp)
if self.in_input_range(result):
# Computed address is an input range, mark it as input
self._known_inputs[result] = size
else:
# TODO(artem): This code may not be necessary
# Is the address computed from an input address?
for p in parts:
# NOTE(artem): the check for input address zero is here purely for sanity checking
if p in self._known_inputs and p != 0:
sys.stdout.write(
f"Input Address: {p:08x} used to compute {result:08x}\n"
)
sys.stdout.write(f"\tAdding {result:08x} to inputs")
# Add a new 'computed' input address
self._known_inputs[result] = size
sys.stdout.write(
"!!! Failed in_input_range but computed from known input\n"
)
break
return result | 35.541667 | 98 | 0.590758 |
aced44023d73e9dce6b68c61f7c424c2e982bbda | 6,975 | py | Python | deen/plugins/formatters/plugin_highlight.py | nkrios/deen | c4997f13af501eb129b75ff7632d59c4d4608963 | [
"Apache-2.0"
] | 47 | 2016-11-09T19:32:14.000Z | 2022-01-19T03:03:09.000Z | deen/plugins/formatters/plugin_highlight.py | Warlockk/deen | d4122083576305cbcc7ffe4cf998993d2337a75c | [
"Apache-2.0"
] | 38 | 2016-10-20T08:19:46.000Z | 2020-10-06T13:38:12.000Z | deen/plugins/formatters/plugin_highlight.py | Warlockk/deen | d4122083576305cbcc7ffe4cf998993d2337a75c | [
"Apache-2.0"
] | 9 | 2017-10-03T09:31:53.000Z | 2021-02-14T02:00:13.000Z | """The GUI part is not properly implemented yet.
The GUI is currently not able to set HTML-formatted
content that will be handled correctly in subsequent
encoder widgets. Adding HTML-formatted content
will currently alter the actual data, which will
influence all subsequent encoder widget results."""
from __future__ import absolute_import
import sys
try:
import pygments
import pygments.lexers
import pygments.formatters
import pygments.styles
PYGMENTS = True
except ImportError:
PYGMENTS = False
from PyQt5.QtWidgets import QDialog
from deen.exceptions import *
from .. import DeenPlugin
from deen.gui.widgets.ui_deenpluginsyntaxhighlighter import Ui_SyntaxHighlighterGui
class DeenPluginSyntaxHighlighter(DeenPlugin):
name = 'syntax_highlighter'
display_name = 'Syntax Highlight (f)'
aliases = ['highlight',
'syntax']
cmd_name = 'syntax-highlight'
cmd_help = 'Reformat HTML data'
formatted = True
def __init__(self):
super(DeenPluginSyntaxHighlighter, self).__init__()
self.parent = None
self.highlightergui = None
def prerequisites(self):
try:
import pygments
except ImportError:
self.log_missing_depdendencies('pygments')
return False
else:
return True
def process(self, data, lexer=None, formatter=None):
super(DeenPluginSyntaxHighlighter, self).process(data)
if not lexer:
lexer = pygments.lexers.TextLexer()
if not formatter:
formatter = pygments.formatters.NullFormatter()
data = pygments.highlight(data, lexer, formatter)
if not isinstance(data, (bytes, bytearray)):
data = data.encode()
return data
@staticmethod
def add_argparser(argparser, *args, **kwargs):
# Python 2 argparse does not support aliases
if sys.version_info.major < 3 or \
(sys.version_info.major == 3 and
sys.version_info.minor < 2):
parser = argparser.add_parser(DeenPluginSyntaxHighlighter.cmd_name,
help=DeenPluginSyntaxHighlighter.cmd_help)
else:
parser = argparser.add_parser(DeenPluginSyntaxHighlighter.cmd_name,
help=DeenPluginSyntaxHighlighter.cmd_help,
aliases=DeenPluginSyntaxHighlighter.aliases)
parser.add_argument('plugindata', action='store', help='input data', nargs='?')
parser.add_argument('--list', action='store_true', dest='list',
default=False, help='list available lexers')
parser.add_argument('--list-formatters', action='store_true', dest='listformatters',
default=False, help='list available formatters')
parser.add_argument('-f', '--file', dest='plugininfile', default=None,
help='file name or - for STDIN', metavar='filename')
parser.add_argument('--formatter', help='formatter to use',
type=str.lower, default=None, metavar='formatter')
parser.add_argument('-l', '--lexer', help='hash algorithm for signature', default=None,
type=str.lower, metavar='lexer')
parser.add_argument('-n', '--numbers', action='store_true', dest='numbers',
default=False, help='print line numbers')
def process_cli(self, args):
if not PYGMENTS:
self.error = MissingDependencyException('pygments is not available')
return
if not self.content:
if not args.plugindata:
if not args.plugininfile:
self.content = self.read_content_from_file('-')
else:
self.content = self.read_content_from_file(args.plugininfile)
else:
self.content = args.plugindata
if not self.content:
return
style = pygments.styles.get_style_by_name('colorful')
if args.lexer:
lexer = pygments.lexers.get_lexer_by_name(args.lexer)
else:
lexer = pygments.lexers.guess_lexer(self.content.decode())
if args.formatter:
self.log.info('Guessing formatter')
formatter = pygments.formatters.get_formatter_by_name(args.formatter)
else:
import curses
curses.setupterm()
if curses.tigetnum('colors') >= 256:
formatter = pygments.formatters.Terminal256Formatter(style=style, linenos=args.numbers)
else:
formatter = pygments.formatters.TerminalFormatter(linenos=args.numbers)
return self.process(self.content, lexer=lexer, formatter=formatter)
def process_gui(self, parent, content):
self.parent = parent
self.highlightergui = SyntaxHighlighterGui(self.parent)
for lexer in pygments.lexers.get_all_lexers():
self.highlightergui.ui.lexer_combo.addItem(lexer[0])
for formatter in pygments.formatters.get_all_formatters():
self.highlightergui.ui.formatter_combo.addItem(formatter.name)
if self.highlightergui.exec_() == 0:
# If the plugin GUI is cancelled, just
# return without doing anything.
return
lexer = self.highlightergui.ui.lexer_combo.currentText()
for l in pygments.lexers.get_all_lexers():
if lexer == l[0]:
try:
lexer = l[1][0]
except Exception:
continue
else:
break
else:
self.log.error('Could not find lexer alias for ' + str(lexer))
return
lexer = pygments.lexers.get_lexer_by_name(lexer)
if not lexer:
self.log.error('Lexer not found: ' + str(lexer))
return
formatter = self.highlightergui.ui.formatter_combo.currentText()
for f in pygments.formatters.get_all_formatters():
if formatter == f.name:
formatter = f.aliases[0]
break
else:
self.log.error('Could not find formatter alias for ' + str(formatter))
return
if formatter == 'html':
formatter = pygments.formatters.get_formatter_by_name(formatter, full=True)
else:
formatter = pygments.formatters.get_formatter_by_name(formatter)
if not formatter:
self.log.error('Formatter not found: ' + str(formatter))
return
content = self.process(content, lexer=lexer, formatter=formatter)
return content
class SyntaxHighlighterGui(QDialog):
def __init__(self, parent):
super(SyntaxHighlighterGui, self).__init__(parent)
self.ui = Ui_SyntaxHighlighterGui()
self.ui.setupUi(self)
self.parent = parent
| 41.272189 | 103 | 0.614624 |
aced443c774d31be3cfc253e153891c3d996d40c | 312 | py | Python | apps/grupo/urls.py | mariomtzjr/podemos_test | 5efaf02a19aa8c4849e3ad0108546e95af524126 | [
"MIT"
] | null | null | null | apps/grupo/urls.py | mariomtzjr/podemos_test | 5efaf02a19aa8c4849e3ad0108546e95af524126 | [
"MIT"
] | null | null | null | apps/grupo/urls.py | mariomtzjr/podemos_test | 5efaf02a19aa8c4849e3ad0108546e95af524126 | [
"MIT"
] | null | null | null | from django.urls import path
from .views import GrupoListar, GrupoCreate, GrupoDelete
urlpatterns = [
path('listar/', GrupoListar.as_view(), name="grupo_listar"),
path('crear/', GrupoCreate.as_view(), name="grupo_crear"),
path('eliminar/<str:id>', GrupoDelete.as_view(), name="grupo_eliminar"),
]
| 28.363636 | 76 | 0.711538 |
aced44597f129909093aa9f424af7cace5483cbc | 2,301 | py | Python | tests.py | victorlomi/pitch | 9e3d28b9eff5230b48e92d32f5bd635644ffc5d7 | [
"Unlicense"
] | null | null | null | tests.py | victorlomi/pitch | 9e3d28b9eff5230b48e92d32f5bd635644ffc5d7 | [
"Unlicense"
] | null | null | null | tests.py | victorlomi/pitch | 9e3d28b9eff5230b48e92d32f5bd635644ffc5d7 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
from datetime import datetime, timedelta
import unittest
from app import create_app, db
from app.models import User, Comment, Pitch
from config import Config
class TestConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
class UserModelCase(unittest.TestCase):
def setUp(self):
self.app = create_app(TestConfig)
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_password_hashing(self):
u = User(username='bella')
u.set_password('cat')
self.assertFalse(u.check_password('dog'))
self.assertTrue(u.check_password('cat'))
def test_pitches(self):
u1 = User(username='john')
p = Pitch(body="example", author=u1)
db.session.add(u1)
db.session.add(p)
db.session.commit()
self.assertEqual(Pitch.query.filter_by(author=u1).first().body, p.body)
class CommentModelCase(unittest.TestCase):
def setUp(self):
self.app = create_app(TestConfig)
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_comments(self):
c = Comment(body="comment")
db.session.add(c)
db.session.commit()
self.assertEqual(Comment.query.filter_by(body=c.body).first().body, c.body)
class PitchModelCase(unittest.TestCase):
def setUp(self):
self.app = create_app(TestConfig)
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_comments(self):
p = Pitch(body="example")
c = Comment(body="comment", pitch=p)
db.session.add(p)
db.session.add(c)
db.session.commit()
self.assertEqual(Comment.query.filter_by(pitch=p).first().body, c.body)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 29.5 | 84 | 0.611908 |
aced44888753e4986944ff0c403d92bfd5f11060 | 4,624 | py | Python | mythx_cli/formatter/simple_stdout.py | ConsenSys/dili-faas-cli | 7b58864f0d2ae200408cf214510d33f8bd52a81d | [
"MIT"
] | null | null | null | mythx_cli/formatter/simple_stdout.py | ConsenSys/dili-faas-cli | 7b58864f0d2ae200408cf214510d33f8bd52a81d | [
"MIT"
] | null | null | null | mythx_cli/formatter/simple_stdout.py | ConsenSys/dili-faas-cli | 7b58864f0d2ae200408cf214510d33f8bd52a81d | [
"MIT"
] | null | null | null | """This module contains a simple text formatter class printing a subset of the
response data."""
from typing import List, Optional, Tuple
from mythx_models.response import (
AnalysisInputResponse,
AnalysisListResponse,
AnalysisStatusResponse,
DetectedIssuesResponse,
GroupListResponse,
GroupStatusResponse,
ProjectListResponse,
VersionResponse,
)
from mythx_cli.formatter.base import BaseFormatter
from mythx_cli.util import index_by_filename
class SimpleFormatter(BaseFormatter):
"""The simple text formatter.
This formatter generates simplified text output. It also displays
the source locations of issues by line in the Solidity source code
if given. Therefore, this formatter requires the analysis input to
be given.
"""
report_requires_input = True
@staticmethod
def format_analysis_list(resp: AnalysisListResponse) -> str:
"""Format an analysis list response to a simple text representation."""
res = []
for analysis in resp.analyses:
res.append("UUID: {}".format(analysis.uuid))
res.append("Submitted at: {}".format(analysis.submitted_at))
res.append("Status: {}".format(analysis.status))
res.append("")
return "\n".join(res)
@staticmethod
def format_group_status(resp: GroupStatusResponse) -> str:
"""Format a group status response to a simple text representation."""
res = [
"ID: {}".format(resp.identifier),
"Name: {}".format(resp.name or "<unnamed>"),
"Created on: {}".format(resp.created_at),
"Status: {}".format(resp.status),
"",
]
return "\n".join(res)
@staticmethod
def format_group_list(resp: GroupListResponse) -> str:
"""Format an analysis group response to a simple text
representation."""
res = []
for group in resp.groups:
res.append("ID: {}".format(group.identifier))
res.append("Name: {}".format(group.name or "<unnamed>"))
res.append("Created on: {}".format(group.created_at))
res.append("Status: {}".format(group.status))
res.append("")
return "\n".join(res)
@staticmethod
def format_project_list(resp: ProjectListResponse) -> str:
"""Format an analysis group response to a simple text
representation."""
res = []
for project in resp.projects:
res.append("ID: {}".format(project.id))
res.append("Name: {}".format(project.name or "<unnamed>"))
res.append("Created on: {}".format(project.created))
res.append("Modified: {}".format(project.modified))
res.append("")
return "\n".join(res)
@staticmethod
def format_analysis_status(resp: AnalysisStatusResponse) -> str:
"""Format an analysis status response to a simple text
representation."""
res = [
"UUID: {}".format(resp.uuid),
"Submitted at: {}".format(resp.submitted_at),
"Status: {}".format(resp.status),
"",
]
return "\n".join(res)
@staticmethod
def format_detected_issues(
issues_list: List[
Tuple[str, DetectedIssuesResponse, Optional[AnalysisInputResponse]]
],
**kwargs,
) -> str:
"""Format an issue report to a simple text representation."""
file_to_issues = index_by_filename(issues_list)
result = []
for filename, data in file_to_issues.items():
result.append(f"Report for {filename}")
# sort by line number
data = sorted([o for o in data if o["issues"]], key=lambda x: x["line"])
for line in data:
for issue in line["issues"]:
result.append(f"Title: {issue['swcTitle']} ({issue['severity']})")
result.append(f"Description: {issue['description']['head']}")
result.append(f"Line: {line['line']}")
result.append("\t" + line["content"].strip() + "\n")
return "\n".join(result)
@staticmethod
def format_version(resp: VersionResponse) -> str:
"""Format a version response to a simple text representation."""
return "\n".join(
[
"API: {}".format(resp.api),
"Harvey: {}".format(resp.harvey),
"Maru: {}".format(resp.maru),
"Mythril: {}".format(resp.mythril),
"Hashed: {}".format(resp.hash),
]
)
| 33.266187 | 86 | 0.584775 |
aced450a57fa8c2d0f11502ad5c1b00a881a0032 | 4,812 | py | Python | final/170401021.py | yigitcanustek/blm2010 | 2e86dab3fc225a7679b6c660fb01902423476a94 | [
"Unlicense"
] | 2 | 2020-05-20T19:25:37.000Z | 2021-04-01T21:26:54.000Z | final/170401021.py | yigitcanustek/blm2010 | 2e86dab3fc225a7679b6c660fb01902423476a94 | [
"Unlicense"
] | 15 | 2020-05-18T14:53:18.000Z | 2020-06-26T09:20:50.000Z | final/170401021.py | yigitcanustek/blm2010 | 2e86dab3fc225a7679b6c660fb01902423476a94 | [
"Unlicense"
] | 155 | 2020-04-28T16:14:38.000Z | 2020-06-26T09:46:59.000Z | import math
import sympy as syp
def oku():
dosya = open("veriler.txt")
y = dosya.readlines()
x=[]
for i in range(len(y)):
y[i]=int(y[i])
x.append(i)
return x,y
def matris_olustur(x, y, n, m):
matris = []
for i in range(m + 1):
satir = []
for j in range(m + 1):
if (i == 0 and j == 0):
satir.append(n)
else:
x_toplam = 0
for x_eleman in x:
x_toplam += x_eleman ** (i + j)
satir.append(x_toplam)
sum_ = 0
for eleman in range(n):
sum_ += (x[eleman] ** i) * y[eleman]
satir.append(sum_)
matris.append(satir)
return matris
def gausselimination(matris): # Gauss
boyut = len(matris)
for i in range(0, boyut):
maxSutun = abs(matris[i][i])
maxSatir = i
for j in range(i + 1, boyut):
if abs(matris[j][i]) > maxSutun:
maxSutun = abs(matris[j][i])
maxSatir = j
for k in range(i, boyut + 1):
temp = matris[maxSatir][k]
matris[maxSatir][k] = matris[i][k]
matris[i][k] = temp
for l in range(i + 1, boyut):
c = -matris[l][i] / matris[i][i]
for j in range(i, boyut + 1):
if i == j:
matris[l][j] = 0
else:
matris[l][j] += c * matris[i][j]
r_matris = [0 for i in range(boyut)]
for i in range(boyut - 1, -1, -1):
r_matris[i] = matris[i][boyut] / matris[i][i]
for k in range(i - 1, -1, -1):
matris[k][boyut] -= matris[k][i] * r_matris[i]
return r_matris
def korelasyon_ve_hata(x,y,n,katsayilar,m):
Sr,St,y_= 0,0,0
for i in y:
y_+= i
y_ /= len(y)
for i in range(n):
Sr_1=0
St += (y[i]-y_)**2
Sr_1 += y[i]-katsayilar[0]
for j in range(1,m+1):
Sr_1 -= katsayilar[j]*(x[i]**j)
Sr_1 = Sr_1**2
Sr+=Sr_1
S_y_x = math.sqrt(abs(Sr/(n-(m+1)))) #Standart tahmini hata
r = math.sqrt(abs((St-Sr)/St)) #korelasyon
return r,S_y_x
def enuygunhesapla(x,y,dosya):
korel = []
dosya.write('------------------------------------------------------- \n')
for i in range(1,7):
matris = matris_olustur(x,y,len(y),i)
katsayılar = gausselimination(matris)
korel.append(korelasyon_ve_hata(x,y,len(y),katsayılar,i))
max,min,temp,w =korel[0][0],korel[0][1],0,0
for i in range(len(korel)):
if korel[i][0] > max:
temp = max
w = i
max = korel[i][0]
if temp < min:
min = temp
print(f'en büyük korelasyon: {max}\nen küçük korelasyon: {min}\nen uygun {w+1}. polinom \n')
bestmatriskatsayi = gausselimination(matris_olustur(x,y,len(y),w+1))
integ = integral(bestmatriskatsayi,len(y))
sembolikdenk = sembolikdenklem(bestmatriskatsayi)
print(f'Denklem : {sembolikdenk} \n {w+1}. dereceden bir denklem \n')
print(f'Polinomlu Integralin Sonucu: {integ} \n')
polinomsuz = polinomsuzintegral(y)
print(f'Polinomsuz Integralin Sonucu : {polinomsuz} \n')
def fonk (bestmatriskatsayi,x):
denklem = bestmatriskatsayi
asıldenk = x**6*denklem[6] + x**5*denklem[5]+x**4*denklem[4] +x**3*denklem[3] +x**2*denklem[2] + x*denklem[1] + denklem[0]
return asıldenk
def integral(bestmatriskatsayi,satirsayisi):
baslangic = 1 # numaram 170401021
bitis = satirsayisi
deltax = 0.01
integral =0
n = int((bitis-baslangic) / deltax)
for i in range(n):
integral += deltax* (fonk(bestmatriskatsayi,baslangic) + fonk(bestmatriskatsayi,baslangic+deltax)) / 2
baslangic = baslangic + deltax
return integral
def sembolikdenklem(denklem):
x = syp.symbols('x')
sd = x**6 * denklem[6] + x**5 * denklem[5]+x**4 * denklem[4] +x**3 * denklem[3] +x**2 * denklem[2] + x * denklem[1] + denklem[0]
return sd
def polinomsuzintegral(data):
a = 1
b = len(data)
deltax = 1
integral = 0
n = int((b-a)/deltax)
for i in range(n-1):
integral+= deltax * (data[a] + data[a+deltax])/2
a+=deltax
return integral
def yorumyap(sonuc):
sonuc.write('Buldugumuz polinomda gercek veriler bire bir ayni bulunmaz, ustune ustluk sayisal integral aliyoruz sayisal integralde de deltanin buyuk ya da \n'
'kucuk olmasina gore bir hata payi vardir. Polinomsuz verilerle aldigimiz integralde ise delta 1 dir. Polinomluda 0.1 aliyorum .\n Farkli cikmasinin sebebi budur.\n '
'Delta ne kadar kucuk olursa gercek sonuca o kadar yaklasiriz.')
sonuc.close()
x,y = oku()
sonuc = open('170401021_yorum.txt','w')
enuygunhesapla(x,y,sonuc)
yorumyap(sonuc)
| 32.958904 | 182 | 0.552369 |
aced4602e7cafcd2177580188c2c83fa7caf7998 | 122 | py | Python | splitio/__init__.py | splitio/python-client | a8aac43f7f82c02c043d9291a509b2212e8d9734 | [
"Apache-2.0"
] | 13 | 2017-03-17T15:15:20.000Z | 2022-03-14T22:24:10.000Z | splitio/__init__.py | splitio/python-client | a8aac43f7f82c02c043d9291a509b2212e8d9734 | [
"Apache-2.0"
] | 81 | 2017-01-12T23:06:48.000Z | 2022-02-21T18:20:23.000Z | splitio/__init__.py | splitio/python-client | a8aac43f7f82c02c043d9291a509b2212e8d9734 | [
"Apache-2.0"
] | 14 | 2017-05-25T10:49:13.000Z | 2021-12-27T16:39:20.000Z | from splitio.client.factory import get_factory
from splitio.client.key import Key
from splitio.version import __version__
| 30.5 | 46 | 0.860656 |
aced471868fcbfda3fa52f5cd86517ef8d91c96b | 8 | py | Python | config/addarg.py | xu-hao/pdspi-mapper-parallex-example | 8b81558213d58d5fc06e938951fde099b41b9cae | [
"MIT"
] | null | null | null | config/addarg.py | xu-hao/pdspi-mapper-parallex-example | 8b81558213d58d5fc06e938951fde099b41b9cae | [
"MIT"
] | null | null | null | config/addarg.py | xu-hao/pdspi-mapper-parallex-example | 8b81558213d58d5fc06e938951fde099b41b9cae | [
"MIT"
] | 2 | 2020-05-02T20:32:27.000Z | 2020-07-18T05:39:24.000Z | yield t
| 4 | 7 | 0.75 |
aced4811798fb4e81193f28e32ae308d6aebd572 | 474 | py | Python | env/lib/python3.8/site-packages/plotly/validators/indicator/gauge/_bordercolor.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/indicator/gauge/_bordercolor.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/indicator/gauge/_bordercolor.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bordercolor", parent_name="indicator.gauge", **kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
| 31.6 | 80 | 0.64557 |
aced49c2205910aef3bc47b013c7327a169d1b36 | 355 | py | Python | fyle/platform/apis/v1beta/spender/reports.py | fylein/fyle-platform-sdk-py | dcf0f1de25e95e41ec213dc97c09196203090d01 | [
"MIT"
] | 1 | 2022-03-08T09:43:30.000Z | 2022-03-08T09:43:30.000Z | fyle/platform/apis/v1beta/spender/reports.py | fylein/fyle-platform-sdk-py | dcf0f1de25e95e41ec213dc97c09196203090d01 | [
"MIT"
] | 2 | 2021-11-22T09:12:12.000Z | 2022-03-17T10:13:40.000Z | fyle/platform/apis/v1beta/spender/reports.py | fylein/fyle-platform-sdk-py | dcf0f1de25e95e41ec213dc97c09196203090d01 | [
"MIT"
] | null | null | null | """
V1 Beta Spender Reports
"""
from ....internals.list_all_resources import ListAllResources
from ....internals.list_resources import ListResources
class Reports(ListResources, ListAllResources):
"""Class for Report APIs."""
REPORTS = '/reports'
def __init__(self, version, role):
super().__init__(version, role, Reports.REPORTS)
| 23.666667 | 61 | 0.721127 |
aced49c287f3b97d1d35fc280175963b8bb4272c | 12,911 | py | Python | spSudoku/main.py | lightofanima/Adventures | efb9d001520ff0b88746d8b3cf024de3307e45c7 | [
"MIT"
] | null | null | null | spSudoku/main.py | lightofanima/Adventures | efb9d001520ff0b88746d8b3cf024de3307e45c7 | [
"MIT"
] | null | null | null | spSudoku/main.py | lightofanima/Adventures | efb9d001520ff0b88746d8b3cf024de3307e45c7 | [
"MIT"
] | null | null | null | #import main dependancies
import pygame, random, time, shlex
from subprocess import Popen, PIPE
#note: Fix text size!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1
#initialise window:
pygame.init()
pygame.font.init()
size = (540,540)
pygame.display.set_caption("Sudoku")
screen = pygame.display.set_mode(size)
clock = pygame.time.Clock()
#initialise font and generate the information needed for drawing text:
font = pygame.font.SysFont('Calibri', 75, False, False)
playerLetterColour = (0,0,0)
#this is an array that represents the various possible texts to be found in the grid.
#first 10 numbers represent pre-generated values.
#next 10 represent user chosen values
text = [
font.render(" ",True,playerLetterColour),
font.render("1",True,playerLetterColour),
font.render("2",True,playerLetterColour),
font.render("3",True,playerLetterColour),
font.render("4",True,playerLetterColour),
font.render("5",True,playerLetterColour),
font.render("6",True,playerLetterColour),
font.render("7",True,playerLetterColour),
font.render("8",True,playerLetterColour),
font.render("9",True,playerLetterColour),
font.render(" ",True,(255,0,0)),
font.render("1",True,(230,20,0)),
font.render("2",True,(150,200,0)),
font.render("3",True,(50,100,120)),
font.render("4",True,(0,255,0)),
font.render("5",True,(0,200,50)),
font.render("6",True,(0,100,250)),
font.render("7",True,(20,0,200)),
font.render("8",True,(20,40,150)),
font.render("9",True,(60,180,0))
]
#an object to represent each small square in the grid
class Square:
value = 0
settable = True
#initialises the square to a given value.
def __init__(self,v):
self.value=v
#returns the number that represents the current value of the square.
def get(self):
return self.value % 10
#sets the value of the square if it is settable by the user.
def set(self,v):
if self.settable:
self.value = 10+v
#sets settability
def setSettability(self,t):
self.settable = t
def isBlank(self):
return self.get() == 0
def markAsPreGenerated(self):
self.setSettability(False)
self.value = self.value-10
#draws the square at the location x,y.
def draw(self, x, y):
screen.blit(text[self.value], [size[0]/9*x+17,size[1]/9*y+2])
#an object to represent the global sudoku grid.
class Grid:
grid = [[]] #grid drawn in transpose for convenience.
mBorderSmall = 0
mBorderLarge = 0
cursorPos = (4,4)
cursorWidth = 5
invalidMessage = font.render("Invalid solution!", True, (255,0,0))
validMessage = font.render("Valid solution!", True, (20,190,20))
shouldPrintMessage = False
boardValid = False
blankSquaresToHave = 10
#initialises the grid, setting the border ticknesses.
def __init__(self, borders, borderl):
self.mBorderSmall = borders
self.mBorderLarge = borderl
self.generateGrid()
#returns True of the grid is valid. False otherwise (according to Sudoku rules)
def validateGrid(self):
for x in range (9):
for y in range(9):
if not self.validateSquare(x,y):
return False
return True
def validateSquare(self,x,y):
return self.grid[x][y].get()!=0 and self.checkHorizontalConstraint(x,y)==1 and self.checkVerticalConstraint(x,y)==1 and self.checkSectionConstraint(x,y)==1
#Returns 1 if passed game rules. More if too many. Less if no occurance.
def checkHorizontalConstraint(self, x, y):
numberOfN = 0
n = self.grid[x][y].get()
for i in range (0, 9):
if self.grid[x][i].get() == n:
numberOfN = numberOfN+1
return numberOfN
#Returns 1 if passed game rules. More if too many. Less if no occurance.
def checkVerticalConstraint(self, x, y):
numberOfN = 0
n = self.grid[x][y].get()
for i in range (0, 9):
if self.grid[i][y].get() == n:
numberOfN = numberOfN+1
return numberOfN
#Returns 1 if passed game rules. More if too many. Less if no occurance.
def checkSectionConstraint(self, x, y):
sectionCoord = (x//3, y//3)
n = self.grid[x][y].get()
numberOfN=0
for i in range (3*sectionCoord[0], 3*sectionCoord[0]+3):
for j in range(3*sectionCoord[1], 3*sectionCoord[1]+3):
if self.grid[i][j].get() == n:
numberOfN = numberOfN + 1
return numberOfN
#returns a list of empty squares in [(x,y)] format
def findEmptySquares(self):
out = []
for i in range(9):
for j in range(9):
if self.grid[i][j].get()==0:
out.append((i,j))
return out
###############################################################################################################################
#returns false if a constraint is broken. Returns true even if not all spaces are filled out.
def gen_checkConstraints(self, x, y):
return self.checkHorizontalConstraint(x,y)<2 and self.checkVerticalConstraint(x,y)<2 and self.checkSectionConstraint(x,y)<2
#similar to solve, but tries to keep things less random.
def solve(self):
#Find all empty squares.
empty = self.findEmptySquares()
#if nothing to do
if len(empty) == 0:
return
#use recursive function:
solution = self.solve_rec(empty)
#apply to grid
for i in range(len(solution[1])):
self.grid[empty[i][0]][empty[i][1]].set(solution[1][i])
def solve_rec(self, remainingEmpty):
current = remainingEmpty[0] #should have >1 elements
#Find possible values that it could take.
possibleValues = []
square = self.grid[current[0]][current[1]]
for i in range(1,10):
square.set(i)
if self.gen_checkConstraints(current[0], current[1]):
possibleValues.append(i)
square.set(0) #reset to default
random.shuffle(possibleValues) #should help make it more random
#impossible
if len(possibleValues) == 0:
return (False, [-1])
#if done, should have only one solution:
if len(remainingEmpty) == 1:
if len(possibleValues) == 1:
return (True, [possibleValues[0]])
else:
return (False,[-2])
#If not done but possibility, try possibility n until it's valid.
for pv in possibleValues:
square.set(pv)
recAnswer = self.solve_rec(remainingEmpty[1:])
square.set(0)
#if valid, return:
if recAnswer[0] == True:
return (True, [pv] + recAnswer[1])
#if reached here, no possibilities
return (False, [-1])
#Solves the board using an external C++ solver.
def externalSolve(self, programName):
args = "./"+programName
for i in range(9):
for j in range(9):
args = args + " " + str(self.grid[i][j].get())
a = shlex.split(args)
proc = Popen(a, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
emptyMapping = shlex.split(out)
blankIndex = 0
for i in range(9):
for j in range(9):
if self.grid[i][j].get()==0:
self.grid[i][j].set(int(emptyMapping[blankIndex]))
blankIndex = blankIndex+1
###############################################################################################################################
#generates a new grid
def generateGrid(self, freeSpaces = 0):
self.grid = [[Square(0) for x in range(9)] for y in range(9)]
t0 = time.time()
#self.externalSolve("randomsolver")
self.solve()
t1 = time.time()
self.clearRandomSquares(self.blankSquaresToHave)
self.setPermissions()
print("Time Taken: " + str(t1-t0) + " s")
def clearRandomSquares(self, noToClear):
noToClear = min(81,noToClear);
noToClear = max(0,noToClear);
randCoords = [(x,y) for x in range(9) for y in range(9)]
random.shuffle(randCoords)
for i in range(noToClear):
self.grid[randCoords[i][0]][randCoords[i][1]].set(0)
def setPermissions(self):
for i in range(9):
for j in range(9):
if not self.grid[i][j].isBlank():
self.grid[i][j].markAsPreGenerated()
#draws the borders/lines to convey the grid.
def drawBorders(self):
for i in range(1,3):
pygame.draw.rect(screen, (0,0,0), [i*(size[0]//3)-(self.mBorderLarge//2), 0, self.mBorderLarge, size[1]])
pygame.draw.rect(screen, (0,0,0), [0, i*(size[0]//3)-(self.mBorderLarge//2), size[0], self.mBorderLarge])
for i in range (3): #go through squares
for j in range(1,3): #go through lines
pygame.draw.rect(screen, (0,0,0), [(size[0]//3)*i + (size[0]//9)*j, 0, self.mBorderSmall, size[1]])
pygame.draw.rect(screen, (0,0,0), [0, (size[1]//3)*i + (size[1]//9)*j, size[0], self.mBorderSmall])
#draws the numbers in the grid.
def drawDigits(self):
for i in range (9):
for j in range(9):
self.grid[j][i].draw(i,j)
#draws the cursor that represents the curently selected tile.
def drawCursor(self):
colour = (0, 255, 150)
pygame.draw.rect(screen, colour, [self.cursorPos[0]*(size[0]//9),self.cursorPos[1]*(size[1]//9),
self.cursorWidth, size[1]//9]) #vertical 1
pygame.draw.rect(screen, colour, [(self.cursorPos[0]+1)*(size[0]//9)-self.cursorWidth,self.cursorPos[1]*(size[1]//9),
self.cursorWidth, size[1]//9]) #vertical 2
pygame.draw.rect(screen, colour, [self.cursorPos[0]*(size[0]//9),self.cursorPos[1]*(size[1]//9),
size[1]//9,self.cursorWidth]) #horizontal 1
pygame.draw.rect(screen, colour, [self.cursorPos[0]*(size[0]//9),(self.cursorPos[1]+1)*(size[1]//9)-self.cursorWidth,
size[1]//9,self.cursorWidth]) #horizontal 2
#draws either an error message or a success message, depending on whether the grid validity flag is set to True.
def drawAnswerConfirmation(self):
xboxdivisor = 8
yboxdivisor = 4
#source: http://stackoverflow.com/questions/6339057/draw-a-transparent-rectangle-in-pygame
s = pygame.Surface((size[0]-size[0]/(xboxdivisor/2),
size[1]-size[1]/(yboxdivisor/2))) # the size of your rect
s.set_alpha(220) # alpha level
s.fill((10,10,80)) # this fills the entire surface
screen.blit(s, (size[0]/xboxdivisor,size[1]/yboxdivisor)) # (0,0) are the top-left coordinates
if self.boardValid:
screen.blit(self.validMessage, [95,size[1]/2-30])
else:
screen.blit(self.invalidMessage, [75,size[1]/2-30])
#validates the grid and triggers a message to confirm/deny validity.
#resets game if won and pressed again.
def submitAnswer(self):
if self.shouldPrintMessage:
if self.boardValid == True:
self.generateGrid()
self.shouldPrintMessage = not self.shouldPrintMessage
if self.shouldPrintMessage:
self.boardValid = self.validateGrid()
#Sets the currently selected tile to the given value.
def setTileAtCursor(self, newValue):
if self.shouldPrintMessage:
return
self.grid[self.cursorPos[1]][self.cursorPos[0]].set(newValue)
#updates the currently selected tile based on the mouse location.
def updateCursor(self, screenPos):
if self.shouldPrintMessage:
return
self.cursorPos = (screenPos[0]//(size[0]//9), screenPos[1]//(size[1]//9))
#moves the currently selected tile, keeping it inside the sudoku grid.
def moveCursor(self, directionX, directionY):
if self.shouldPrintMessage:
return
self.cursorPos = (self.cursorPos[0]+directionX, self.cursorPos[1]+directionY)
if self.cursorPos[0] < 0 or self.cursorPos[1] < 0 or self.cursorPos[0] > 8 or self.cursorPos[1] > 8:
self.moveCursor(-directionX, -directionY)
#draws all the components of the grid.
def draw(self):
width = size[0]/9
height = size[1]/9
for i in range(9):
for j in range(9):
pygame.draw.rect(screen, (255,255,255), [i*width, j*height, width,height])
self.drawBorders()
self.drawCursor()
self.drawDigits()
if self.shouldPrintMessage:
self.drawAnswerConfirmation()
#main game execution loop:
done = False #should we stop?
grid = Grid(4,8) #the grid defind above is created
#main loop
while not done:
#check for user input and call appropriate functions.
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.MOUSEBUTTONDOWN:
grid.updateCursor(pygame.mouse.get_pos())
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_1:
grid.setTileAtCursor(1)
elif event.key == pygame.K_2:
grid.setTileAtCursor(2)
elif event.key == pygame.K_3:
grid.setTileAtCursor(3)
elif event.key == pygame.K_4:
grid.setTileAtCursor(4)
elif event.key == pygame.K_5:
grid.setTileAtCursor(5)
elif event.key == pygame.K_6:
grid.setTileAtCursor(6)
elif event.key == pygame.K_7:
grid.setTileAtCursor(7)
elif event.key == pygame.K_8:
grid.setTileAtCursor(8)
elif event.key == pygame.K_9:
grid.setTileAtCursor(9)
elif event.key == pygame.K_0:
grid.setTileAtCursor(0)
elif event.key == pygame.K_UP or event.key == pygame.K_w:
grid.moveCursor(0,-1)
elif event.key == pygame.K_DOWN or event.key == pygame.K_s:
grid.moveCursor(0,1)
elif event.key == pygame.K_LEFT or event.key == pygame.K_a:
grid.moveCursor(-1,0)
elif event.key == pygame.K_RIGHT or event.key == pygame.K_d:
grid.moveCursor(1,0)
elif event.key == pygame.K_RETURN:
grid.submitAnswer()
#draw and continue:
screen.fill((0,0,0))
grid.draw()
pygame.display.flip()
clock.tick(60)
#shutdown pygame:
pygame.quit() | 29.343182 | 157 | 0.667338 |
aced4a39f4f5cf6dea86732b2362bbb18206e54c | 5,688 | py | Python | kvgui.py | lutet88/pyQt5-keypress-visualizer | 99c5d07855ab35cc7a11b5a51dd55345623633c5 | [
"MIT"
] | null | null | null | kvgui.py | lutet88/pyQt5-keypress-visualizer | 99c5d07855ab35cc7a11b5a51dd55345623633c5 | [
"MIT"
] | null | null | null | kvgui.py | lutet88/pyQt5-keypress-visualizer | 99c5d07855ab35cc7a11b5a51dd55345623633c5 | [
"MIT"
] | null | null | null | # kvgui.py (main GUI script and controller, for now)
# for PyQt5-keyboard-visualizer by lutet88
import sys
from PySide6.QtCore import Qt, QTimer
from PySide6.QtWidgets import QApplication, QLabel, QMainWindow
from PySide6.QtGui import QPixmap, QFont, QFontDatabase
import config_parser
import keyboard_listener
version = "0.0.3a"
def createApplication():
return QApplication(sys.argv)
def switchModes(mode):
# gets numeric value else default 0
return {"none":0, "count":1, "name":2, "both":3}.get(mode.lower(), 0)
class MainGUI(QMainWindow):
# initialize GUI
def __init__(self, config, app, *args, **kwargs):
print("[KVGUI] Initializing... (version v{version})".format(version=version))
super(MainGUI, self).__init__(*args, **kwargs)
print("[KVGUI] QMainWindow initialized.")
self.app = app
self.initVars(config)
self.initGUI()
self.createFonts()
self.initKeyboard()
if cps_enable:
self.initCPS()
print("[KVGUI] GUI init complete. Application Launching...")
self.initTimer()
def initVars(self, config):
global tilesize, width, height, num_keys, keys, keyimage, keyimage_dark, keyimage_maps, \
font, fontsize, fontcolor, backgroundcolor, displaymode, resetkey, pollingrate, cps_enable, \
cps_x, cps_y
v = config_parser.loadConfig(config)
tilesize, width, height = v["tilesize"], v["windowwidth"], v["windowheight"]
num_keys = width * height
keys, keyimage, keyimage_dark = v["keys"], v["keyimage"], v["keyimage_dark"]
keyimage_maps = [QPixmap(keyimage).scaled(tilesize, tilesize), QPixmap(keyimage_dark).scaled(tilesize, tilesize)]
font, fontsize, fontcolor = v["font"], v["fontsize"], v["fontcolor"]
backgroundcolor = v["backgroundcolor"]
displaymode = switchModes(v["displaymode"])
resetkey = v["resetkey"]
pollingrate = 1000 / v["pollingratehz"]
cps_enable = v["cps-enable"]
cps_x = v["cps-x"]
cps_y = v["cps-y"]
def initGUI(self):
self.setWindowTitle("keyboard visualizer v{version}".format(version=version))
self.setDimensions(tilesize * width, tilesize * height)
self.setBGColor(backgroundcolor)
def initKeyboard(self):
self.keylabels = [None for i in range(len(keys))]
self.textlabels = [None for i in range(len(keys))]
self.kl = keyboard_listener.KeyboardListener(len(keys), resetkey)
self.addKeys()
def initTimer(self):
self.timer = QTimer()
self.timer.timeout.connect(self.update)
self.timer.start(pollingrate)
print("[KVGUI] Refresh timer started with interval "+str(pollingrate)+"ms")
def createFonts(self):
text_font = QFontDatabase.addApplicationFont(font)
self.text_font = QFont(QFontDatabase.applicationFontFamilies(text_font)[0], fontsize)
def initCPS(self):
cps = QLabel(self)
cps.setGeometry(0, 0, tilesize, tilesize)
cps.move(tilesize * cps_x, tilesize * cps_y)
cps.setAlignment(Qt.AlignCenter)
cps.setText("0")
cps.setFont(self.text_font)
cps.setStyleSheet("background-color: rgba(0,0,0,0); color : "+fontcolor+";")
self.cps = cps
print("[KVGUI] CPS system initialized")
def update(self):
self.timer.stop()
self.kl.update()
self.updateKeys()
if cps_enable:
self.updateCPS()
self.show()
self.timer.start(pollingrate)
def setBGColor(self, color):
self.setStyleSheet("background-color: " + color + ";")
def setDimensions(self, width, height):
self.setGeometry(100, 100, width, height)
def addKeys(self):
for id in range(len(keys)):
self.addKey(id, keys[id])
def addKey(self, id, key):
if key["enabled"]:
# enable keycode
self.kl.setKeyCode(id, str(key["keyCode"]))
# image element (background)
q = QLabel(self)
q.setGeometry(0, 0, tilesize, tilesize)
q.move(tilesize * key["x"], tilesize * key["y"])
q.setPixmap(QPixmap(keyimage).scaled(tilesize, tilesize))
# text element
t = QLabel(self)
t.setGeometry(0, 0, tilesize, tilesize)
t.move(tilesize * key["x"], tilesize * key["y"])
t.setAlignment(Qt.AlignCenter)
t.setText("")
t.setFont(self.text_font)
t.setStyleSheet("background-color: rgba(0,0,0,0); color : "+fontcolor+";")
# add to instance
self.keylabels[id] = q
self.textlabels[id] = t
else:
self.kl.setKeyCode(id, None)
def updateKeys(self):
for id in range(len(keys)):
self.updateKey(id, keys[id])
def updateKey(self, id, key):
if key["enabled"]:
q = self.keylabels[id]
q.setPixmap(keyimage_maps[1 if self.kl.pressed[id] else 0])
text = str(key["name"]) if displaymode & 0b10 else ""
text += "\n" if displaymode == 0b11 else ""
text += str(self.kl.counts[id]) if displaymode & 0b01 else ""
self.textlabels[id].setText(text)
def updateCPS(self):
self.cps.setText(f"{self.kl.updateCPS()}")
def closeEvent(self, event):
print("[KVGUI] Stopping...")
self.kl.stopListening()
event.accept()
self.close()
self.app.quit()
exit()
if __name__ == "__main__":
# test MainGUI
app = createApplication()
m = MainGUI("config/config.yml")
app.exec()
| 34.682927 | 121 | 0.607243 |
aced4a3e538e80e5e4a3de1f3a5a7a0d2d3f3904 | 2,031 | py | Python | bot/plugins/plugin_calculate.py | Fogapod/ChatBot_UI | 872a3552cebcc110f07d57721a38787252f4d573 | [
"MIT"
] | 26 | 2017-04-24T09:51:42.000Z | 2022-01-27T01:35:08.000Z | bot/plugins/plugin_calculate.py | Fogapod/ChatBot_UI | 872a3552cebcc110f07d57721a38787252f4d573 | [
"MIT"
] | null | null | null | bot/plugins/plugin_calculate.py | Fogapod/ChatBot_UI | 872a3552cebcc110f07d57721a38787252f4d573 | [
"MIT"
] | 10 | 2017-04-01T04:28:02.000Z | 2020-12-22T19:51:18.000Z | # coding:utf8
import math
class Plugin(object):
__doc__ = '''Плагин редназначен для вычисления результатов метематического выражения.
Для использования необходимо иметь уровень доступа {protection} или выше
Ключевые слова: [{keywords}]
Использование: {keyword} <выражение>
Пример: {keyword} (3/4) * PI * 7^3'''
name = 'calculate'
keywords = (name, u'посчитай', '=')
protection = 0
argument_required = True
def respond(self, msg, rsp, *args, **kwargs):
expression = ''.join(msg.args[1:]).lower()
if re.match(u'^([\d+\-*/%:().,^√πe]|(sqrt)|(pi))+$', expression):
expression = re.sub(u'(sqrt)|√', 'math.sqrt', expression)
expression = re.sub(u'(pi)|π', 'math.pi', expression)
expression = re.sub(u':|÷', '/', expression)
expression = re.sub('e', 'math.e', expression)
expression = re.sub('\^', '**', expression)
expression = re.sub(',', '.', expression)
expression = re.sub('(?P<int>(?<![\d.])\d+(?![\d.]))',
'\g<int>.', expression)
try:
result = eval(expression)
except SyntaxError:
result = u'Ошибка [0]\n' + expression
except NameError:
result = u'Ошибка [1]\n' + expression
except AttributeError:
result = u'Ошибка [2]\n' + expression
except TypeError:
result = u'Ошибка [3]\n' + expression
except ZeroDivisionError:
result = u'Деление на 0'
except OverflowError:
result = 'Infinite'
else:
if type(result) not in (int, long, float):
result = u'Не математическая операция'
else:
result = str(result)
else:
result = u'Не математическая операция'
rsp.text = result
return rsp
| 35.017241 | 89 | 0.497784 |
aced4a9af882a4b676cf254414010d01110ee979 | 16,756 | py | Python | memory_saving_gradients.py | ChenChuang/gradient-checkpointing | 62d5a67b457777cc548db17ab212fb390b1096bb | [
"MIT"
] | null | null | null | memory_saving_gradients.py | ChenChuang/gradient-checkpointing | 62d5a67b457777cc548db17ab212fb390b1096bb | [
"MIT"
] | null | null | null | memory_saving_gradients.py | ChenChuang/gradient-checkpointing | 62d5a67b457777cc548db17ab212fb390b1096bb | [
"MIT"
] | null | null | null | from toposort import toposort
import contextlib
import numpy as np
import tensorflow as tf
import tensorflow.contrib.graph_editor as ge
import time
import sys
sys.setrecursionlimit(10000)
# refers back to current module if we decide to split helpers out
util = sys.modules[__name__]
# getting rid of "WARNING:tensorflow:VARIABLES collection name is deprecated"
setattr(tf.GraphKeys, "VARIABLES", "variables")
# save original gradients since tf.gradient could be monkey-patched to point
# to our version
from tensorflow.python.ops import gradients as tf_gradients_lib
tf_gradients = tf_gradients_lib.gradients
MIN_CHECKPOINT_NODE_SIZE=1024 # use lower value during testing
# specific versions we can use to do process-wide replacement of tf.gradients
def gradients_speed(ys, xs, grad_ys=None, **kwargs):
return gradients(ys, xs, grad_ys, checkpoints='speed', **kwargs)
def gradients_memory(ys, xs, grad_ys=None, **kwargs):
return gradients(ys, xs, grad_ys, checkpoints='memory', **kwargs)
def gradients_collection(ys, xs, grad_ys=None, **kwargs):
return gradients(ys, xs, grad_ys, checkpoints='collection', **kwargs)
def gradients(ys, xs, grad_ys=None, checkpoints='collection', **kwargs):
'''
Authors: Tim Salimans & Yaroslav Bulatov
memory efficient gradient implementation inspired by "Training Deep Nets with Sublinear Memory Cost"
by Chen et al. 2016 (https://arxiv.org/abs/1604.06174)
ys,xs,grad_ys,kwargs are the arguments to standard tensorflow tf.gradients
(https://www.tensorflow.org/versions/r0.12/api_docs/python/train.html#gradients)
'checkpoints' can either be
- a list consisting of tensors from the forward pass of the neural net
that we should re-use when calculating the gradients in the backward pass
all other tensors that do not appear in this list will be re-computed
- a string specifying how this list should be determined. currently we support
- 'speed': checkpoint all outputs of convolutions and matmuls. these ops are usually the most expensive,
so checkpointing them maximizes the running speed
(this is a good option if nonlinearities, concats, batchnorms, etc are taking up a lot of memory)
- 'memory': try to minimize the memory usage
(currently using a very simple strategy that identifies a number of bottleneck tensors in the graph to checkpoint)
- 'collection': look for a tensorflow collection named 'checkpoints', which holds the tensors to checkpoint
'''
# print("Calling memsaving gradients with", checkpoints)
if not isinstance(ys,list):
ys = [ys]
if not isinstance(xs,list):
xs = [xs]
bwd_ops = ge.get_backward_walk_ops([y.op for y in ys],
inclusive=True)
debug_print("bwd_ops: %s", bwd_ops)
# forward ops are all ops that are candidates for recomputation
fwd_ops = ge.get_forward_walk_ops([x.op for x in xs],
inclusive=True,
within_ops=bwd_ops)
debug_print("fwd_ops: %s", fwd_ops)
# exclude ops with no inputs
fwd_ops = [op for op in fwd_ops if op._inputs]
# don't recompute xs, remove variables
xs_ops = _to_ops(xs)
fwd_ops = [op for op in fwd_ops if not op in xs_ops]
fwd_ops = [op for op in fwd_ops if not '/assign' in op.name]
fwd_ops = [op for op in fwd_ops if not '/Assign' in op.name]
fwd_ops = [op for op in fwd_ops if not '/read' in op.name]
ts_all = ge.filter_ts(fwd_ops, True) # get the tensors
ts_all = [t for t in ts_all if '/read' not in t.name]
ts_all = set(ts_all) - set(xs) - set(ys)
# construct list of tensors to checkpoint during forward pass, if not
# given as input
if type(checkpoints) is not list:
if checkpoints == 'collection':
checkpoints = tf.get_collection('checkpoints')
elif checkpoints == 'speed':
# checkpoint all expensive ops to maximize running speed
checkpoints = ge.filter_ts_from_regex(fwd_ops, 'conv2d|Conv|MatMul')
elif checkpoints == 'memory':
# remove very small tensors and some weird ops
def fixdims(t): # tf.Dimension values are not compatible with int, convert manually
try:
return [int(e if e.value is not None else 64) for e in t]
except:
return [0] # unknown shape
ts_all = [t for t in ts_all if np.prod(fixdims(t.shape)) > MIN_CHECKPOINT_NODE_SIZE]
ts_all = [t for t in ts_all if 'L2Loss' not in t.name]
ts_all = [t for t in ts_all if 'entropy' not in t.name]
ts_all = [t for t in ts_all if 'FusedBatchNorm' not in t.name]
ts_all = [t for t in ts_all if 'Switch' not in t.name]
ts_all = [t for t in ts_all if 'dropout' not in t.name]
# filter out all tensors that are inputs of the backward graph
with util.capture_ops() as bwd_ops:
tf_gradients(ys, xs, grad_ys, **kwargs)
bwd_inputs = [t for op in bwd_ops for t in op.inputs]
# list of tensors in forward graph that is in input to bwd graph
ts_filtered = list(set(bwd_inputs).intersection(ts_all))
debug_print("Using tensors %s", ts_filtered)
# try two slightly different ways of getting bottlenecks tensors
# to checkpoint
for ts in [ts_filtered, ts_all]:
# get all bottlenecks in the graph
bottleneck_ts = []
for t in ts:
b = set(ge.get_backward_walk_ops(t.op, inclusive=True, within_ops=fwd_ops))
f = set(ge.get_forward_walk_ops(t.op, inclusive=False, within_ops=fwd_ops))
# check that there are not shortcuts
b_inp = set([inp for op in b for inp in op.inputs]).intersection(ts_all)
f_inp = set([inp for op in f for inp in op.inputs]).intersection(ts_all)
if not set(b_inp).intersection(f_inp) and len(b_inp)+len(f_inp) >= len(ts_all):
bottleneck_ts.append(t) # we have a bottleneck!
else:
debug_print("Rejected bottleneck candidate and ops %s", [t] + list(set(ts_all) - set(b_inp) - set(f_inp)))
# success? or try again without filtering?
if len(bottleneck_ts) >= np.sqrt(len(ts_filtered)): # yes, enough bottlenecks found!
break
if not bottleneck_ts:
raise Exception('unable to find bottleneck tensors! please provide checkpoint nodes manually, or use checkpoints="speed".')
# sort the bottlenecks
bottlenecks_sorted_lists = tf_toposort(bottleneck_ts, within_ops=fwd_ops)
sorted_bottlenecks = [t for ts in bottlenecks_sorted_lists for t in ts]
# save an approximately optimal number ~ sqrt(N)
N = len(ts_filtered)
if len(bottleneck_ts) <= np.ceil(np.sqrt(N)):
checkpoints = sorted_bottlenecks
else:
step = int(np.ceil(len(bottleneck_ts) / np.sqrt(N)))
checkpoints = sorted_bottlenecks[step::step]
else:
raise Exception('%s is unsupported input for "checkpoints"' % (checkpoints,))
checkpoints = list(set(checkpoints).intersection(ts_all))
# at this point automatic selection happened and checkpoints is list of nodes
assert isinstance(checkpoints, list)
debug_print("Checkpoint nodes used: %s", checkpoints)
# better error handling of special cases
# xs are already handled as checkpoint nodes, so no need to include them
xs_intersect_checkpoints = set(xs).intersection(set(checkpoints))
if xs_intersect_checkpoints:
debug_print("Warning, some input nodes are also checkpoint nodes: %s",
xs_intersect_checkpoints)
ys_intersect_checkpoints = set(ys).intersection(set(checkpoints))
debug_print("ys: %s, checkpoints: %s, intersect: %s", ys, checkpoints,
ys_intersect_checkpoints)
# saving an output node (ys) gives no benefit in memory while creating
# new edge cases, exclude them
if ys_intersect_checkpoints:
debug_print("Warning, some output nodes are also checkpoints nodes: %s",
format_ops(ys_intersect_checkpoints))
# remove initial and terminal nodes from checkpoints list if present
checkpoints = list(set(checkpoints) - set(ys) - set(xs))
# check that we have some nodes to checkpoint
if not checkpoints:
raise Exception('no checkpoints nodes found or given as input! ')
# disconnect dependencies between checkpointed tensors
checkpoints_disconnected = {}
for x in checkpoints:
if x.op and x.op.name is not None:
grad_node = tf.stop_gradient(x, name=x.op.name+"_sg")
else:
grad_node = tf.stop_gradient(x)
checkpoints_disconnected[x] = grad_node
# partial derivatives to the checkpointed tensors and xs
ops_to_copy = fast_backward_ops(seed_ops=[y.op for y in ys],
stop_at_ts=checkpoints, within_ops=fwd_ops)
debug_print("Found %s ops to copy within fwd_ops %s, seed %s, stop_at %s",
len(ops_to_copy), fwd_ops, [r.op for r in ys], checkpoints)
debug_print("ops_to_copy = %s", ops_to_copy)
debug_print("Processing list %s", ys)
copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {})
for origin_op, op in info._transformed_ops.items():
op._set_device(origin_op.node_def.device)
copied_ops = info._transformed_ops.values()
debug_print("Copied %s to %s", ops_to_copy, copied_ops)
ge.reroute_ts(checkpoints_disconnected.values(), checkpoints_disconnected.keys(), can_modify=copied_ops)
debug_print("Rewired %s in place of %s restricted to %s",
checkpoints_disconnected.values(), checkpoints_disconnected.keys(), copied_ops)
# get gradients with respect to current boundary + original x's
copied_ys = [info._transformed_ops[y.op]._outputs[0] for y in ys]
boundary = list(checkpoints_disconnected.values())
dv = tf_gradients(ys=copied_ys, xs=boundary+xs, grad_ys=grad_ys, **kwargs)
debug_print("Got gradients %s", dv)
debug_print("for %s", copied_ys)
debug_print("with respect to %s", boundary+xs)
inputs_to_do_before = [y.op for y in ys]
if grad_ys is not None:
inputs_to_do_before += grad_ys
wait_to_do_ops = list(copied_ops) + [g.op for g in dv if g is not None]
my_add_control_inputs(wait_to_do_ops, inputs_to_do_before)
# partial derivatives to the checkpointed nodes
# dictionary of "node: backprop" for nodes in the boundary
d_checkpoints = {r: dr for r,dr in zip(checkpoints_disconnected.keys(),
dv[:len(checkpoints_disconnected)])}
# partial derivatives to xs (usually the params of the neural net)
d_xs = dv[len(checkpoints_disconnected):]
# incorporate derivatives flowing through the checkpointed nodes
checkpoints_sorted_lists = tf_toposort(checkpoints, within_ops=fwd_ops)
for ts in checkpoints_sorted_lists[::-1]:
debug_print("Processing list %s", ts)
checkpoints_other = [r for r in checkpoints if r not in ts]
checkpoints_disconnected_other = [checkpoints_disconnected[r] for r in checkpoints_other]
# copy part of the graph below current checkpoint node, stopping at
# other checkpoints nodes
ops_to_copy = fast_backward_ops(within_ops=fwd_ops, seed_ops=[r.op for r in ts], stop_at_ts=checkpoints_other)
debug_print("Found %s ops to copy within %s, seed %s, stop_at %s",
len(ops_to_copy), fwd_ops, [r.op for r in ts],
checkpoints_other)
debug_print("ops_to_copy = %s", ops_to_copy)
if not ops_to_copy: # we're done!
break
copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {})
for origin_op, op in info._transformed_ops.items():
op._set_device(origin_op.node_def.device)
copied_ops = info._transformed_ops.values()
debug_print("Copied %s to %s", ops_to_copy, copied_ops)
ge.reroute_ts(checkpoints_disconnected_other, checkpoints_other, can_modify=copied_ops)
debug_print("Rewired %s in place of %s restricted to %s",
checkpoints_disconnected_other, checkpoints_other, copied_ops)
# gradient flowing through the checkpointed node
boundary = [info._transformed_ops[r.op]._outputs[0] for r in ts]
substitute_backprops = [d_checkpoints[r] for r in ts]
dv = tf_gradients(boundary,
checkpoints_disconnected_other+xs,
grad_ys=substitute_backprops, **kwargs)
debug_print("Got gradients %s", dv)
debug_print("for %s", boundary)
debug_print("with respect to %s", checkpoints_disconnected_other+xs)
debug_print("with boundary backprop substitutions %s", substitute_backprops)
inputs_to_do_before = [d_checkpoints[r].op for r in ts]
wait_to_do_ops = list(copied_ops) + [g.op for g in dv if g is not None]
my_add_control_inputs(wait_to_do_ops, inputs_to_do_before)
# partial derivatives to the checkpointed nodes
for r, dr in zip(checkpoints_other, dv[:len(checkpoints_other)]):
if dr is not None:
if d_checkpoints[r] is None:
d_checkpoints[r] = dr
else:
d_checkpoints[r] += dr
# partial derivatives to xs (usually the params of the neural net)
d_xs_new = dv[len(checkpoints_other):]
for j in range(len(xs)):
if d_xs_new[j] is not None:
if d_xs[j] is None:
d_xs[j] = d_xs_new[j]
else:
d_xs[j] += d_xs_new[j]
return d_xs
def tf_toposort(ts, within_ops=None):
all_ops = ge.get_forward_walk_ops([x.op for x in ts], within_ops=within_ops)
deps = {}
for op in all_ops:
for o in op.outputs:
deps[o] = set(op.inputs)
sorted_ts = toposort(deps)
# only keep the tensors from our original list
ts_sorted_lists = []
for l in sorted_ts:
keep = list(set(l).intersection(ts))
if keep:
ts_sorted_lists.append(keep)
return ts_sorted_lists
def fast_backward_ops(within_ops, seed_ops, stop_at_ts):
bwd_ops = set(ge.get_backward_walk_ops(seed_ops, stop_at_ts=stop_at_ts))
ops = bwd_ops.intersection(within_ops).difference([t.op for t in stop_at_ts])
return list(ops)
@contextlib.contextmanager
def capture_ops():
"""Decorator to capture ops created in the block.
with capture_ops() as ops:
# create some ops
print(ops) # => prints ops created.
"""
micros = int(time.time()*10**6)
scope_name = str(micros)
op_list = []
with tf.name_scope(scope_name):
yield op_list
g = tf.get_default_graph()
op_list.extend(ge.select_ops(scope_name+"/.*", graph=g))
def _to_op(tensor_or_op):
if hasattr(tensor_or_op, "op"):
return tensor_or_op.op
return tensor_or_op
def _to_ops(iterable):
if not _is_iterable(iterable):
return iterable
return [_to_op(i) for i in iterable]
def _is_iterable(o):
try:
_ = iter(o)
except Exception:
return False
return True
DEBUG_LOGGING=False
def debug_print(s, *args):
"""Like logger.log, but also replaces all TensorFlow ops/tensors with their
names. Sensitive to value of DEBUG_LOGGING, see enable_debug/disable_debug
Usage:
debug_print("see tensors %s for %s", tensorlist, [1,2,3])
"""
if DEBUG_LOGGING:
formatted_args = [format_ops(arg) for arg in args]
print("DEBUG "+s % tuple(formatted_args))
def format_ops(ops, sort_outputs=True):
"""Helper method for printing ops. Converts Tensor/Operation op to op.name,
rest to str(op)."""
if hasattr(ops, '__iter__') and not isinstance(ops, str):
l = [(op.name if hasattr(op, "name") else str(op)) for op in ops]
if sort_outputs:
return sorted(l)
return l
else:
return ops.name if hasattr(ops, "name") else str(ops)
def my_add_control_inputs(wait_to_do_ops, inputs_to_do_before):
for op in wait_to_do_ops:
ci = [i for i in inputs_to_do_before if op.control_inputs is None or i not in op.control_inputs]
ge.add_control_inputs(op, ci)
| 44.445623 | 139 | 0.655407 |
aced4ba4acd2a02aa958b0482859c77e43501ba9 | 20,002 | py | Python | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/VERSION/GL_4_5.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/VERSION/GL_4_5.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/VERSION/GL_4_5.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_VERSION_GL_4_5'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_VERSION_GL_4_5',error_checker=_errors._error_checker)
GL_BACK=_C('GL_BACK',0x0405)
GL_CLIP_DEPTH_MODE=_C('GL_CLIP_DEPTH_MODE',0x935D)
GL_CLIP_ORIGIN=_C('GL_CLIP_ORIGIN',0x935C)
GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT=_C('GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT',0x00000004)
GL_CONTEXT_LOST=_C('GL_CONTEXT_LOST',0x0507)
GL_CONTEXT_LOST=_C('GL_CONTEXT_LOST',0x0507)
GL_CONTEXT_RELEASE_BEHAVIOR=_C('GL_CONTEXT_RELEASE_BEHAVIOR',0x82FB)
GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH=_C('GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH',0x82FC)
GL_GUILTY_CONTEXT_RESET=_C('GL_GUILTY_CONTEXT_RESET',0x8253)
GL_INNOCENT_CONTEXT_RESET=_C('GL_INNOCENT_CONTEXT_RESET',0x8254)
GL_LOSE_CONTEXT_ON_RESET=_C('GL_LOSE_CONTEXT_ON_RESET',0x8252)
GL_LOWER_LEFT=_C('GL_LOWER_LEFT',0x8CA1)
GL_MAX_COMBINED_CLIP_AND_CULL_DISTANCES=_C('GL_MAX_COMBINED_CLIP_AND_CULL_DISTANCES',0x82FA)
GL_MAX_CULL_DISTANCES=_C('GL_MAX_CULL_DISTANCES',0x82F9)
GL_NEGATIVE_ONE_TO_ONE=_C('GL_NEGATIVE_ONE_TO_ONE',0x935E)
GL_NONE=_C('GL_NONE',0)
GL_NO_ERROR=_C('GL_NO_ERROR',0)
GL_NO_RESET_NOTIFICATION=_C('GL_NO_RESET_NOTIFICATION',0x8261)
GL_QUERY_BY_REGION_NO_WAIT_INVERTED=_C('GL_QUERY_BY_REGION_NO_WAIT_INVERTED',0x8E1A)
GL_QUERY_BY_REGION_WAIT_INVERTED=_C('GL_QUERY_BY_REGION_WAIT_INVERTED',0x8E19)
GL_QUERY_NO_WAIT_INVERTED=_C('GL_QUERY_NO_WAIT_INVERTED',0x8E18)
GL_QUERY_TARGET=_C('GL_QUERY_TARGET',0x82EA)
GL_QUERY_WAIT_INVERTED=_C('GL_QUERY_WAIT_INVERTED',0x8E17)
GL_RESET_NOTIFICATION_STRATEGY=_C('GL_RESET_NOTIFICATION_STRATEGY',0x8256)
GL_TEXTURE_BINDING_1D=_C('GL_TEXTURE_BINDING_1D',0x8068)
GL_TEXTURE_BINDING_1D_ARRAY=_C('GL_TEXTURE_BINDING_1D_ARRAY',0x8C1C)
GL_TEXTURE_BINDING_2D=_C('GL_TEXTURE_BINDING_2D',0x8069)
GL_TEXTURE_BINDING_2D_ARRAY=_C('GL_TEXTURE_BINDING_2D_ARRAY',0x8C1D)
GL_TEXTURE_BINDING_2D_MULTISAMPLE=_C('GL_TEXTURE_BINDING_2D_MULTISAMPLE',0x9104)
GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY=_C('GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY',0x9105)
GL_TEXTURE_BINDING_3D=_C('GL_TEXTURE_BINDING_3D',0x806A)
GL_TEXTURE_BINDING_BUFFER=_C('GL_TEXTURE_BINDING_BUFFER',0x8C2C)
GL_TEXTURE_BINDING_CUBE_MAP=_C('GL_TEXTURE_BINDING_CUBE_MAP',0x8514)
GL_TEXTURE_BINDING_CUBE_MAP_ARRAY=_C('GL_TEXTURE_BINDING_CUBE_MAP_ARRAY',0x900A)
GL_TEXTURE_BINDING_RECTANGLE=_C('GL_TEXTURE_BINDING_RECTANGLE',0x84F6)
GL_TEXTURE_TARGET=_C('GL_TEXTURE_TARGET',0x1006)
GL_UNKNOWN_CONTEXT_RESET=_C('GL_UNKNOWN_CONTEXT_RESET',0x8255)
GL_UPPER_LEFT=_C('GL_UPPER_LEFT',0x8CA2)
GL_ZERO_TO_ONE=_C('GL_ZERO_TO_ONE',0x935F)
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint)
def glBindTextureUnit(unit,texture):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLbitfield,_cs.GLenum)
def glBlitNamedFramebuffer(readFramebuffer,drawFramebuffer,srcX0,srcY0,srcX1,srcY1,dstX0,dstY0,dstX1,dstY1,mask,filter):pass
@_f
@_p.types(_cs.GLenum,_cs.GLuint,_cs.GLenum)
def glCheckNamedFramebufferStatus(framebuffer,target):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
def glClearNamedBufferData(buffer,internalformat,format,type,data):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLintptr,_cs.GLsizeiptr,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
def glClearNamedBufferSubData(buffer,internalformat,offset,size,format,type,data):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLint,_cs.GLfloat,_cs.GLint)
def glClearNamedFramebufferfi(framebuffer,buffer,drawbuffer,depth,stencil):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLint,arrays.GLfloatArray)
def glClearNamedFramebufferfv(framebuffer,buffer,drawbuffer,value):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLint,arrays.GLintArray)
def glClearNamedFramebufferiv(framebuffer,buffer,drawbuffer,value):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLint,arrays.GLuintArray)
def glClearNamedFramebufferuiv(framebuffer,buffer,drawbuffer,value):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum)
def glClipControl(origin,depth):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLenum,_cs.GLsizei,ctypes.c_void_p)
def glCompressedTextureSubImage1D(texture,level,xoffset,width,format,imageSize,data):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei,_cs.GLenum,_cs.GLsizei,ctypes.c_void_p)
def glCompressedTextureSubImage2D(texture,level,xoffset,yoffset,width,height,format,imageSize,data):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei,_cs.GLsizei,_cs.GLenum,_cs.GLsizei,ctypes.c_void_p)
def glCompressedTextureSubImage3D(texture,level,xoffset,yoffset,zoffset,width,height,depth,format,imageSize,data):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLintptr,_cs.GLintptr,_cs.GLsizeiptr)
def glCopyNamedBufferSubData(readBuffer,writeBuffer,readOffset,writeOffset,size):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLsizei)
def glCopyTextureSubImage1D(texture,level,xoffset,x,y,width):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei)
def glCopyTextureSubImage2D(texture,level,xoffset,yoffset,x,y,width,height):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei)
def glCopyTextureSubImage3D(texture,level,xoffset,yoffset,zoffset,x,y,width,height):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLuintArray)
def glCreateBuffers(n,buffers):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLuintArray)
def glCreateFramebuffers(n,framebuffers):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLuintArray)
def glCreateProgramPipelines(n,pipelines):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,arrays.GLuintArray)
def glCreateQueries(target,n,ids):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLuintArray)
def glCreateRenderbuffers(n,renderbuffers):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLuintArray)
def glCreateSamplers(n,samplers):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,arrays.GLuintArray)
def glCreateTextures(target,n,textures):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLuintArray)
def glCreateTransformFeedbacks(n,ids):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLuintArray)
def glCreateVertexArrays(n,arrays):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint)
def glDisableVertexArrayAttrib(vaobj,index):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint)
def glEnableVertexArrayAttrib(vaobj,index):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLintptr,_cs.GLsizeiptr)
def glFlushMappedNamedBufferRange(buffer,offset,length):pass
@_f
@_p.types(None,_cs.GLuint)
def glGenerateTextureMipmap(texture):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLsizei,ctypes.c_void_p)
def glGetCompressedTextureImage(texture,level,bufSize,pixels):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei,_cs.GLsizei,_cs.GLsizei,ctypes.c_void_p)
def glGetCompressedTextureSubImage(texture,level,xoffset,yoffset,zoffset,width,height,depth,bufSize,pixels):pass
@_f
@_p.types(_cs.GLenum,)
def glGetGraphicsResetStatus():pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLint64Array)
def glGetNamedBufferParameteri64v(buffer,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetNamedBufferParameteriv(buffer,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLvoidpArray)
def glGetNamedBufferPointerv(buffer,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLintptr,_cs.GLsizeiptr,ctypes.c_void_p)
def glGetNamedBufferSubData(buffer,offset,size,data):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLenum,arrays.GLintArray)
def glGetNamedFramebufferAttachmentParameteriv(framebuffer,attachment,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetNamedFramebufferParameteriv(framebuffer,pname,param):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetNamedRenderbufferParameteriv(renderbuffer,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLenum,_cs.GLintptr)
def glGetQueryBufferObjecti64v(id,buffer,pname,offset):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLenum,_cs.GLintptr)
def glGetQueryBufferObjectiv(id,buffer,pname,offset):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLenum,_cs.GLintptr)
def glGetQueryBufferObjectui64v(id,buffer,pname,offset):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLenum,_cs.GLintptr)
def glGetQueryBufferObjectuiv(id,buffer,pname,offset):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLenum,_cs.GLenum,_cs.GLsizei,ctypes.c_void_p)
def glGetTextureImage(texture,level,format,type,bufSize,pixels):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLenum,arrays.GLfloatArray)
def glGetTextureLevelParameterfv(texture,level,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLenum,arrays.GLintArray)
def glGetTextureLevelParameteriv(texture,level,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetTextureParameterIiv(texture,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLuintArray)
def glGetTextureParameterIuiv(texture,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLfloatArray)
def glGetTextureParameterfv(texture,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetTextureParameteriv(texture,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei,_cs.GLsizei,_cs.GLenum,_cs.GLenum,_cs.GLsizei,ctypes.c_void_p)
def glGetTextureSubImage(texture,level,xoffset,yoffset,zoffset,width,height,depth,format,type,bufSize,pixels):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLuint,arrays.GLint64Array)
def glGetTransformFeedbacki64_v(xfb,pname,index,param):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLuint,arrays.GLintArray)
def glGetTransformFeedbacki_v(xfb,pname,index,param):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetTransformFeedbackiv(xfb,pname,param):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLenum,arrays.GLint64Array)
def glGetVertexArrayIndexed64iv(vaobj,index,pname,param):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetVertexArrayIndexediv(vaobj,index,pname,param):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetVertexArrayiv(vaobj,pname,param):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLsizei,ctypes.c_void_p)
def glGetnColorTable(target,format,type,bufSize,table):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint,_cs.GLsizei,ctypes.c_void_p)
def glGetnCompressedTexImage(target,lod,bufSize,pixels):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLsizei,ctypes.c_void_p)
def glGetnConvolutionFilter(target,format,type,bufSize,image):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLboolean,_cs.GLenum,_cs.GLenum,_cs.GLsizei,ctypes.c_void_p)
def glGetnHistogram(target,reset,format,type,bufSize,values):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLsizei,arrays.GLdoubleArray)
def glGetnMapdv(target,query,bufSize,v):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLsizei,arrays.GLfloatArray)
def glGetnMapfv(target,query,bufSize,v):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLsizei,arrays.GLintArray)
def glGetnMapiv(target,query,bufSize,v):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLboolean,_cs.GLenum,_cs.GLenum,_cs.GLsizei,ctypes.c_void_p)
def glGetnMinmax(target,reset,format,type,bufSize,values):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,arrays.GLfloatArray)
def glGetnPixelMapfv(map,bufSize,values):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,arrays.GLuintArray)
def glGetnPixelMapuiv(map,bufSize,values):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,arrays.GLushortArray)
def glGetnPixelMapusv(map,bufSize,values):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLubyteArray)
def glGetnPolygonStipple(bufSize,pattern):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLsizei,ctypes.c_void_p,_cs.GLsizei,ctypes.c_void_p,ctypes.c_void_p)
def glGetnSeparableFilter(target,format,type,rowBufSize,row,columnBufSize,column,span):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint,_cs.GLenum,_cs.GLenum,_cs.GLsizei,ctypes.c_void_p)
def glGetnTexImage(target,level,format,type,bufSize,pixels):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLsizei,arrays.GLdoubleArray)
def glGetnUniformdv(program,location,bufSize,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLsizei,arrays.GLfloatArray)
def glGetnUniformfv(program,location,bufSize,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLsizei,arrays.GLintArray)
def glGetnUniformiv(program,location,bufSize,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLsizei,arrays.GLuintArray)
def glGetnUniformuiv(program,location,bufSize,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLuintArray)
def glInvalidateNamedFramebufferData(framebuffer,numAttachments,attachments):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLuintArray,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei)
def glInvalidateNamedFramebufferSubData(framebuffer,numAttachments,attachments,x,y,width,height):pass
@_f
@_p.types(ctypes.c_void_p,_cs.GLuint,_cs.GLenum)
def glMapNamedBuffer(buffer,access):pass
@_f
@_p.types(ctypes.c_void_p,_cs.GLuint,_cs.GLintptr,_cs.GLsizeiptr,_cs.GLbitfield)
def glMapNamedBufferRange(buffer,offset,length,access):pass
@_f
@_p.types(None,_cs.GLbitfield)
def glMemoryBarrierByRegion(barriers):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizeiptr,ctypes.c_void_p,_cs.GLenum)
def glNamedBufferData(buffer,size,data,usage):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizeiptr,ctypes.c_void_p,_cs.GLbitfield)
def glNamedBufferStorage(buffer,size,data,flags):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLintptr,_cs.GLsizeiptr,ctypes.c_void_p)
def glNamedBufferSubData(buffer,offset,size,data):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum)
def glNamedFramebufferDrawBuffer(framebuffer,buf):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLuintArray)
def glNamedFramebufferDrawBuffers(framebuffer,n,bufs):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLint)
def glNamedFramebufferParameteri(framebuffer,pname,param):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum)
def glNamedFramebufferReadBuffer(framebuffer,src):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLenum,_cs.GLuint)
def glNamedFramebufferRenderbuffer(framebuffer,attachment,renderbuffertarget,renderbuffer):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLuint,_cs.GLint)
def glNamedFramebufferTexture(framebuffer,attachment,texture,level):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLuint,_cs.GLint,_cs.GLint)
def glNamedFramebufferTextureLayer(framebuffer,attachment,texture,level,layer):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLsizei,_cs.GLsizei)
def glNamedRenderbufferStorage(renderbuffer,internalformat,width,height):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,_cs.GLenum,_cs.GLsizei,_cs.GLsizei)
def glNamedRenderbufferStorageMultisample(renderbuffer,samples,internalformat,width,height):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei,_cs.GLenum,_cs.GLenum,_cs.GLsizei,ctypes.c_void_p)
def glReadnPixels(x,y,width,height,format,type,bufSize,data):pass
@_f
@_p.types(None,)
def glTextureBarrier():pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLuint)
def glTextureBuffer(texture,internalformat,buffer):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLuint,_cs.GLintptr,_cs.GLsizeiptr)
def glTextureBufferRange(texture,internalformat,buffer,offset,size):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glTextureParameterIiv(texture,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLuintArray)
def glTextureParameterIuiv(texture,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLfloat)
def glTextureParameterf(texture,pname,param):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLfloatArray)
def glTextureParameterfv(texture,pname,param):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLint)
def glTextureParameteri(texture,pname,param):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glTextureParameteriv(texture,pname,param):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,_cs.GLenum,_cs.GLsizei)
def glTextureStorage1D(texture,levels,internalformat,width):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,_cs.GLenum,_cs.GLsizei,_cs.GLsizei)
def glTextureStorage2D(texture,levels,internalformat,width,height):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,_cs.GLenum,_cs.GLsizei,_cs.GLsizei,_cs.GLboolean)
def glTextureStorage2DMultisample(texture,samples,internalformat,width,height,fixedsamplelocations):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,_cs.GLenum,_cs.GLsizei,_cs.GLsizei,_cs.GLsizei)
def glTextureStorage3D(texture,levels,internalformat,width,height,depth):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,_cs.GLenum,_cs.GLsizei,_cs.GLsizei,_cs.GLsizei,_cs.GLboolean)
def glTextureStorage3DMultisample(texture,samples,internalformat,width,height,depth,fixedsamplelocations):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
def glTextureSubImage1D(texture,level,xoffset,width,format,type,pixels):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
def glTextureSubImage2D(texture,level,xoffset,yoffset,width,height,format,type,pixels):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei,_cs.GLsizei,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
def glTextureSubImage3D(texture,level,xoffset,yoffset,zoffset,width,height,depth,format,type,pixels):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glTransformFeedbackBufferBase(xfb,index,buffer):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLintptr,_cs.GLsizeiptr)
def glTransformFeedbackBufferRange(xfb,index,buffer,offset,size):pass
@_f
@_p.types(_cs.GLboolean,_cs.GLuint)
def glUnmapNamedBuffer(buffer):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glVertexArrayAttribBinding(vaobj,attribindex,bindingindex):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLint,_cs.GLenum,_cs.GLboolean,_cs.GLuint)
def glVertexArrayAttribFormat(vaobj,attribindex,size,type,normalized,relativeoffset):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLint,_cs.GLenum,_cs.GLuint)
def glVertexArrayAttribIFormat(vaobj,attribindex,size,type,relativeoffset):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLint,_cs.GLenum,_cs.GLuint)
def glVertexArrayAttribLFormat(vaobj,attribindex,size,type,relativeoffset):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glVertexArrayBindingDivisor(vaobj,bindingindex,divisor):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint)
def glVertexArrayElementBuffer(vaobj,buffer):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLintptr,_cs.GLsizei)
def glVertexArrayVertexBuffer(vaobj,bindingindex,buffer,offset,stride):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLsizei,arrays.GLuintArray,ctypes.POINTER(_cs.GLintptr),arrays.GLsizeiArray)
def glVertexArrayVertexBuffers(vaobj,first,count,buffers,offsets,strides):pass
| 47.73747 | 153 | 0.815268 |
aced4d8cd5f497a325d3e137ad80410414c52cf1 | 30,083 | py | Python | quantecon/quad.py | TomWeiland/QuantEcon | 1389f5e5a8cb7ad426df60e60843ed24b257aab2 | [
"BSD-3-Clause"
] | 1 | 2019-03-19T13:55:49.000Z | 2019-03-19T13:55:49.000Z | quantecon/quad.py | TomWeiland/QuantEcon | 1389f5e5a8cb7ad426df60e60843ed24b257aab2 | [
"BSD-3-Clause"
] | null | null | null | quantecon/quad.py | TomWeiland/QuantEcon | 1389f5e5a8cb7ad426df60e60843ed24b257aab2 | [
"BSD-3-Clause"
] | 1 | 2019-01-20T13:45:13.000Z | 2019-01-20T13:45:13.000Z | """
Filename: quad.py
Authors: Chase Coleman, Spencer Lyon
Date: 2014-07-01
Defining various quadrature routines.
Based on the quadrature routines found in the CompEcon toolbox by
Miranda and Fackler.
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational Economics
and Finance, MIT Press, 2002.
"""
from __future__ import division
import math
import numpy as np
import scipy.linalg as la
from scipy.special import gammaln
import sympy as sym
from .ce_util import ckron, gridmake
__all__ = ['qnwcheb', 'qnwequi', 'qnwlege', 'qnwnorm', 'qnwlogn',
'qnwsimp', 'qnwtrap', 'qnwunif', 'quadrect', 'qnwbeta',
'qnwgamma']
# ------------------ #
# Exported Functions #
# ------------------ #
def qnwcheb(n, a=1, b=1):
"""
Computes multivariate Guass-Checbychev quadrature nodes and weights.
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwcheb`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
return _make_multidim_func(_qnwcheb1, n, a, b)
def qnwequi(n, a, b, kind="N", equidist_pp=None):
"""
Generates equidistributed sequences with property that averages
value of integrable function evaluated over the sequence converges
to the integral as n goes to infinity.
Parameters
----------
n : int
Number of sequence points
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
kind : string, optional(default="N")
One of the following:
- N - Neiderreiter (default)
- W - Weyl
- H - Haber
- R - pseudo Random
equidist_pp : array_like, optional(default=None)
TODO: I don't know what this does
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwequi`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
if equidist_pp is None:
equidist_pp = np.sqrt(np.array(list(sym.primerange(0, 7920))))
n, a, b = list(map(np.atleast_1d, list(map(np.asarray, [n, a, b]))))
d = max(list(map(len, [n, a, b])))
n = np.prod(n)
if a.size == 1:
a = np.repeat(a, d)
if b.size == 1:
b = np.repeat(b, d)
i = np.arange(1, n + 1)
if kind.upper() == "N": # Neiderreiter
j = 2.0 ** (np.arange(1, d+1) / (d+1))
nodes = np.outer(i, j)
nodes = (nodes - np.fix(nodes)).squeeze()
elif kind.upper() == "W": # Weyl
j = equidist_pp[:d]
nodes = np.outer(i, j)
nodes = (nodes - np.fix(nodes)).squeeze()
elif kind.upper() == "H": # Haber
j = equidist_pp[:d]
nodes = np.outer(i * (i+1) / 2, j)
nodes = (nodes - np.fix(nodes)).squeeze()
elif kind.upper() == "R": # pseudo-random
nodes = np.random.rand(n, d).squeeze()
else:
raise ValueError("Unknown sequence requested")
# compute nodes and weights
r = b - a
nodes = a + nodes * r
weights = (np.prod(r) / n) * np.ones(n)
return nodes, weights
def qnwlege(n, a, b):
"""
Computes multivariate Guass-Legendre quadrature nodes and weights.
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwlege`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
return _make_multidim_func(_qnwlege1, n, a, b)
def qnwnorm(n, mu=None, sig2=None, usesqrtm=False):
"""
Computes nodes and weights for multivariate normal distribution
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
mu : scalar or array_like(float), optional(default=zeros(d))
The means of each dimension of the random variable. If a scalar
is given, that constant is repeated d times, where d is the
number of dimensions
sig2 : array_like(float), optional(default=eye(d))
A d x d array representing the variance-covariance matrix of the
multivariate normal distribution.
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwnorm`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
n = np.asarray(n)
d = n.size
if mu is None:
mu = np.zeros(d)
else:
mu = np.asarray(mu)
if sig2 is None:
sig2 = np.eye(d)
else:
sig2 = np.asarray(sig2).reshape(d, d)
if all([x.size == 1 for x in [n, mu, sig2]]):
nodes, weights = _qnwnorm1(n)
else:
nodes = []
weights = []
for i in range(d):
_1d = _qnwnorm1(n[i])
nodes.append(_1d[0])
weights.append(_1d[1])
nodes = gridmake(*nodes)
weights = ckron(*weights[::-1])
if usesqrtm:
new_sig2 = la.sqrtm(sig2)
else: # cholesky
new_sig2 = la.cholesky(sig2)
if d > 1:
nodes = nodes.dot(new_sig2) + mu # Broadcast ok
else: # nodes.dot(sig) will not be aligned in scalar case.
nodes = nodes * new_sig2 + mu
return nodes.squeeze(), weights
def qnwlogn(n, mu=None, sig2=None):
"""
Computes nodes and weights for multivariate lognormal distribution
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
mu : scalar or array_like(float), optional(default=zeros(d))
The means of each dimension of the random variable. If a scalar
is given, that constant is repeated d times, where d is the
number of dimensions
sig2 : array_like(float), optional(default=eye(d))
A d x d array representing the variance-covariance matrix of the
multivariate normal distribution.
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwlogn`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
nodes, weights = qnwnorm(n, mu, sig2)
return np.exp(nodes), weights
def qnwsimp(n, a, b):
"""
Computes multivariate Simpson quadrature nodes and weights.
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwsimp`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
return _make_multidim_func(_qnwsimp1, n, a, b)
def qnwtrap(n, a, b):
"""
Computes multivariate trapezoid rule quadrature nodes and weights.
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwtrap`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
return _make_multidim_func(_qnwtrap1, n, a, b)
def qnwunif(n, a, b):
"""
Computes quadrature nodes and weights for multivariate uniform
distribution
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwunif`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
n, a, b = list(map(np.asarray, [n, a, b]))
nodes, weights = qnwlege(n, a, b)
weights = weights / np.prod(b - a)
return nodes, weights
def quadrect(f, n, a, b, kind='lege', *args, **kwargs):
"""
Integrate the d-dimensional function f on a rectangle with lower and
upper bound for dimension i defined by a[i] and b[i], respectively;
using n[i] points.
Parameters
----------
f : function
The function to integrate over. This should be a function
that accepts as its first argument a matrix representing points
along each dimension (each dimension is a column). Other
arguments that need to be passed to the function are caught by
`*args` and `**kwargs`
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
kind : string, optional(default='lege')
Specifies which type of integration to perform. Valid
values are:
lege - Gauss-Legendre
cheb - Gauss-Chebyshev
trap - trapezoid rule
simp - Simpson rule
N - Neiderreiter equidistributed sequence
W - Weyl equidistributed sequence
H - Haber equidistributed sequence
R - Monte Carlo
*args, **kwargs :
Other arguments passed to the function f
Returns
-------
out : scalar (float)
The value of the integral on the region [a, b]
Notes
-----
Based of original function ``quadrect`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
if kind.lower() == "lege":
nodes, weights = qnwlege(n, a, b)
elif kind.lower() == "cheb":
nodes, weights = qnwcheb(n, a, b)
elif kind.lower() == "trap":
nodes, weights = qnwtrap(n, a, b)
elif kind.lower() == "simp":
nodes, weights = qnwsimp(n, a, b)
else:
nodes, weights = qnwequi(n, a, b, kind)
out = weights.dot(f(nodes, *args, **kwargs))
return out
def qnwbeta(n, a=1.0, b=1.0):
"""
Computes nodes and weights for beta distribution
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float), optional(default=1.0)
A length-d
b : array_like(float), optional(default=1.0)
A d x d array representing the variance-covariance matrix of the
multivariate normal distribution.
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwbeta`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
return _make_multidim_func(_qnwbeta1, n, a, b)
def qnwgamma(n, a=None):
"""
Computes nodes and weights for gamma distribution
Parameters
----------
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
mu : scalar or array_like(float), optional(default=zeros(d))
The means of each dimension of the random variable. If a scalar
is given, that constant is repeated d times, where d is the
number of dimensions
sig2 : array_like(float), optional(default=eye(d))
A d x d array representing the variance-covariance matrix of the
multivariate normal distribution.
Returns
-------
nodes : np.ndarray(dtype=float)
Quadrature nodes
weights : np.ndarray(dtype=float)
Weights for quadrature nodes
Notes
-----
Based of original function ``qnwgamma`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
return _make_multidim_func(_qnwgamma1, n, a)
# ------------------ #
# Internal Functions #
# ------------------ #
def _make_multidim_func(one_d_func, n, *args):
"""
A helper function to cut down on code repetition. Almost all of the
code in qnwcheb, qnwlege, qnwsimp, qnwtrap is just dealing
various forms of input arguments and then shelling out to the
corresponding 1d version of the function.
This routine does all the argument checking and passes things
through the appropriate 1d function before using a tensor product
to combine weights and nodes.
Parameters
----------
one_d_func : function
The 1d function to be called along each dimension
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
args :
These are the arguments to various qnw____ functions. For the
majority of the functions this is just a and b, but some differ.
Returns
-------
func : function
The multi-dimensional version of the parameter ``one_d_func``
"""
args = list(args)
n = np.asarray(n)
args = list(map(np.asarray, args))
if all([x.size == 1 for x in [n] + args]):
return one_d_func(n, *args)
d = n.size
for i in range(len(args)):
if args[i].size == 1:
args[i] = np.repeat(args[i], d)
nodes = []
weights = []
for i in range(d):
ai = [x[i] for x in args]
_1d = one_d_func(n[i], *ai)
nodes.append(_1d[0])
weights.append(_1d[1])
weights = ckron(*weights[::-1]) # reverse ordered tensor product
nodes = gridmake(*nodes)
return nodes, weights
def _qnwcheb1(n, a, b):
"""
Compute univariate Guass-Checbychev quadrature nodes and weights
Parameters
----------
n : int
The number of nodes
a : int
The lower endpoint
b : int
The upper endpoint
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwcheb1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
nodes = (b+a)/2 - (b-a)/2 * np.cos(np.pi/n * np.linspace(0.5, n-0.5, n))
# Create temporary arrays to be used in computing weights
t1 = np.arange(1, n+1) - 0.5
t2 = np.arange(0.0, n, 2)
t3 = np.concatenate([np.array([1.0]),
-2.0/(np.arange(1.0, n-1, 2)*np.arange(3.0, n+1, 2))])
# compute weights and return
weights = ((b-a)/n)*np.cos(np.pi/n*np.outer(t1, t2)).dot(t3)
return nodes, weights
def _qnwlege1(n, a, b):
"""
Compute univariate Guass-Legendre quadrature nodes and weights
Parameters
----------
n : int
The number of nodes
a : int
The lower endpoint
b : int
The upper endpoint
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwlege1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
# import ipdb; ipdb.set_trace()
maxit = 100
m = np.fix((n + 1) / 2.0).astype(int)
xm = 0.5 * (b + a)
xl = 0.5 * (b - a)
nodes = np.zeros(n)
weights = nodes.copy()
i = np.arange(m, dtype='int')
z = np.cos(np.pi * ((i + 1.0) - 0.25) / (n + 0.5))
for its in range(maxit):
p1 = 1.0
p2 = 0.0
for j in range(1, n+1):
p3 = p2
p2 = p1
p1 = ((2 * j - 1) * z * p2 - (j - 1) * p3) / j
pp = n * (z * p1 - p2)/(z * z - 1.0)
z1 = z.copy()
z = z1 - p1/pp
if all(np.abs(z - z1) < 1e-14):
break
if its == maxit - 1:
raise ValueError("Maximum iterations in _qnwlege1")
nodes[i] = xm - xl * z
nodes[- i - 1] = xm + xl * z
weights[i] = 2 * xl / ((1 - z * z) * pp * pp)
weights[- i - 1] = weights[i]
return nodes, weights
def _qnwnorm1(n):
"""
Compute nodes and weights for quadrature of univariate standard
normal distribution
Parameters
----------
n : int
The number of nodes
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwnorm1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
maxit = 100
pim4 = 1 / np.pi**(0.25)
m = np.fix((n + 1) / 2).astype(int)
nodes = np.zeros(n)
weights = np.zeros(n)
for i in range(m):
if i == 0:
z = np.sqrt(2*n+1) - 1.85575 * ((2 * n + 1)**(-1 / 6.1))
elif i == 1:
z = z - 1.14 * (n ** 0.426) / z
elif i == 2:
z = 1.86 * z + 0.86 * nodes[0]
elif i == 3:
z = 1.91 * z + 0.91 * nodes[1]
else:
z = 2 * z + nodes[i-2]
its = 0
while its < maxit:
its += 1
p1 = pim4
p2 = 0
for j in range(1, n+1):
p3 = p2
p2 = p1
p1 = z * math.sqrt(2.0/j) * p2 - math.sqrt((j - 1.0) / j) * p3
pp = math.sqrt(2 * n) * p2
z1 = z
z = z1 - p1/pp
if abs(z - z1) < 1e-14:
break
if its == maxit:
raise ValueError("Failed to converge in _qnwnorm1")
nodes[n - 1 - i] = z
nodes[i] = -z
weights[i] = 2 / (pp*pp)
weights[n - 1 - i] = weights[i]
weights /= math.sqrt(math.pi)
nodes = nodes * math.sqrt(2.0)
return nodes, weights
def _qnwsimp1(n, a, b):
"""
Compute univariate Simpson quadrature nodes and weights
Parameters
----------
n : int
The number of nodes
a : int
The lower endpoint
b : int
The upper endpoint
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwsimp1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
if n % 2 == 0:
print("WARNING qnwsimp: n must be an odd integer. Increasing by 1")
n += 1
nodes = np.linspace(a, b, n)
dx = nodes[1] - nodes[0]
weights = np.tile([2.0, 4.0], (n + 1) // 2)
weights = weights[:n]
weights[0] = weights[-1] = 1
weights = (dx / 3.0) * weights
return nodes, weights
def _qnwtrap1(n, a, b):
"""
Compute univariate trapezoid rule quadrature nodes and weights
Parameters
----------
n : int
The number of nodes
a : int
The lower endpoint
b : int
The upper endpoint
Returns
-------
nodes : np.ndarray(dtype=float)
An n element array of nodes
nodes : np.ndarray(dtype=float)
An n element array of weights
Notes
-----
Based of original function ``qnwtrap1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
if n < 1:
raise ValueError("n must be at least one")
nodes = np.linspace(a, b, n)
dx = nodes[1] - nodes[0]
weights = dx * np.ones(n)
weights[0] *= 0.5
weights[-1] *= 0.5
return nodes, weights
def _qnwbeta1(n, a=1.0, b=1.0):
"""
Computes nodes and weights for quadrature on the beta distribution.
Default is a=b=1 which is just a uniform distribution
NOTE: For now I am just following compecon; would be much better to
find a different way since I don't know what they are doing.
Parameters
----------
n : scalar : int
The number of quadrature points
a : scalar : float, optional(default=1)
First Beta distribution parameter
b : scalar : float, optional(default=1)
Second Beta distribution parameter
Returns
-------
nodes : np.ndarray(dtype=float, ndim=1)
The quadrature points
weights : np.ndarray(dtype=float, ndim=1)
The quadrature weights that correspond to nodes
Notes
-----
Based of original function ``_qnwbeta1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
# We subtract one and write a + 1 where we actually want a, and a
# where we want a - 1
a = a - 1
b = b - 1
maxiter = 25
# Allocate empty space
nodes = np.zeros(n)
weights = np.zeros(n)
# Find "reasonable" starting values. Why these numbers?
for i in range(n):
if i == 0:
an = a/n
bn = b/n
r1 = (1+a) * (2.78/(4+n*n) + .768*an/n)
r2 = 1 + 1.48*an + .96*bn + .452*an*an + .83*an*bn
z = 1 - r1/r2
elif i == 1:
r1 = (4.1+a) / ((1+a)*(1+0.156*a))
r2 = 1 + 0.06 * (n-8) * (1+0.12*a)/n
r3 = 1 + 0.012*b * (1+0.25*abs(a))/n
z = z - (1-z) * r1 * r2 * r3
elif i == 2:
r1 = (1.67+0.28*a)/(1+0.37*a)
r2 = 1+0.22*(n-8)/n
r3 = 1+8*b/((6.28+b)*n*n)
z = z-(nodes[0]-z)*r1*r2*r3
elif i == n - 2:
r1 = (1+0.235*b)/(0.766+0.119*b)
r2 = 1/(1+0.639*(n-4)/(1+0.71*(n-4)))
r3 = 1/(1+20*a/((7.5+a)*n*n))
z = z+(z-nodes[-4])*r1*r2*r3
elif i == n - 1:
r1 = (1+0.37*b) / (1.67+0.28*b)
r2 = 1 / (1+0.22*(n-8)/n)
r3 = 1 / (1+8*a/((6.28+a)*n*n))
z = z+(z-nodes[-3])*r1*r2*r3
else:
z = 3*nodes[i-1] - 3*nodes[i-2] + nodes[i-3]
ab = a+b
# Root finding
its = 0
z1 = -100
while abs(z - z1) > 1e-10 and its < maxiter:
temp = 2 + ab
p1 = (a-b + temp*z)/2
p2 = 1
for j in range(2, n+1):
p3 = p2
p2 = p1
temp = 2*j + ab
aa = 2*j * (j+ab)*(temp-2)
bb = (temp-1) * (a*a - b*b + temp*(temp-2) * z)
c = 2 * (j - 1 + a) * (j - 1 + b) * temp
p1 = (bb*p2 - c*p3)/aa
pp = (n*(a-b-temp*z) * p1 + 2*(n+a)*(n+b)*p2)/(temp*(1 - z*z))
z1 = z
z = z1 - p1/pp
if abs(z - z1) < 1e-12:
break
its += 1
if its == maxiter:
raise ValueError("Max Iteration reached. Failed to converge")
nodes[i] = z
weights[i] = temp/(pp*p2)
nodes = (1-nodes)/2
weights = weights * math.exp(gammaln(a+n) + gammaln(b+n)
- gammaln(n+1) - gammaln(n+ab+1))
weights = weights / (2*math.exp(gammaln(a+1) + gammaln(b+1)
- gammaln(ab+2)))
return nodes, weights
def _qnwgamma1(n, a=None):
"""
Insert docs. Default is a=0
NOTE: For now I am just following compecon; would be much better to
find a different way since I don't know what they are doing.
Parameters
----------
n : scalar : int
The number of quadrature points
a : scalar : float
Gamma distribution parameter
Returns
-------
nodes : np.ndarray(dtype=float, ndim=1)
The quadrature points
weights : np.ndarray(dtype=float, ndim=1)
The quadrature weights that correspond to nodes
Notes
-----
Based of original function ``qnwgamma1`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
if a is None:
a = 0
else:
a -= 1
maxit = 10
factor = -math.exp(gammaln(a+n) - gammaln(n) - gammaln(a+1))
nodes = np.zeros(n)
weights = np.zeros(n)
# Create nodes
for i in range(n):
# Reasonable starting values
if i == 0:
z = (1+a) * (3+0.92*a) / (1 + 2.4*n + 1.8*a)
elif i == 1:
z = z + (15 + 6.25*a) / (1 + 0.9*a + 2.5*n)
else:
j = i-1
z = z + ((1 + 2.55*j) / (1.9*j) + 1.26*j*a / (1 + 3.5*j)) * \
(z - nodes[j-1]) / (1 + 0.3*a)
# root finding iterations
its = 0
z1 = -10000
while abs(z - z1) > 1e-10 and its < maxit:
p1 = 1.0
p2 = 0.0
for j in range(1, n+1):
p3 = p2
p2 = p1
p1 = ((2*j - 1 + a - z)*p2 - (j - 1 + a)*p3) / j
pp = (n*p1 - (n+a)*p2) / z
z1 = z
z = z1 - p1/pp
its += 1
if its == maxit:
raise ValueError('Failure to converge')
nodes[i] = z
weights[i] = factor / (pp*n*p2)
return nodes, weights
| 25.822318 | 78 | 0.569059 |
aced4e4d12f1ed508f8d519d0387158b5abbe356 | 2,179 | py | Python | code/super_minitaur/script/utils.py | buenos-dan/quadrupedal_robot | 605054c027e20b83e347f2aa175c03c965e72983 | [
"MIT"
] | 5 | 2019-03-22T06:39:42.000Z | 2021-07-27T13:56:45.000Z | code/super_minitaur/script/utils.py | buenos-dan/quadrupedal_robot | 605054c027e20b83e347f2aa175c03c965e72983 | [
"MIT"
] | null | null | null | code/super_minitaur/script/utils.py | buenos-dan/quadrupedal_robot | 605054c027e20b83e347f2aa175c03c965e72983 | [
"MIT"
] | 2 | 2021-02-16T09:52:04.000Z | 2021-11-30T12:12:55.000Z | #!/usr/bin/env python
# coding:utf-8
import rospy
import numpy as np
from math import *
from settings import *
def TorqueCal(f_x,f_y,angle_x,angle_y):
'''
:param Fx:足端x分力 N
:param Fy:足端y分力 N
:param angle_x:前电机转角 rad
:param angle_y:后电机转角 rad
:return:[[前电机电流,
后电机电流]]
'''
force = np.mat([[f_x],
[0],
[f_y]])
jaco = Jacobian3(angle_x, angle_y)
torque = jaco.T * force
cur = TorqueToCur(torque)
return [float(cur[0][0]),float(cur[2][0])]
def Jacobian3(alpha, beta):
"""
3*3雅可比矩阵
:param alpha,beta:电机转角
:return: 3*3雅可比矩阵
"""
l1 = LEG_LENGTH[0]
l2 = LEG_LENGTH[1]
dl2 = DELTA_L2
theta1 = (alpha+beta)/2
theta2 = (beta-alpha)/2
Q = sqrt(pow(l2, 2)-pow(l1*cos(theta1), 2))
A = 1.0/2 * (sin(theta2) * (l1 * cos(theta1) + pow(l1, 2) * cos(theta1) * sin(theta1)/Q) + dl2 * cos(theta2 + asin(l1/l2*cos(theta1))) * (-(l1/l2 * sin(theta1))/sqrt(1-pow((l1/l2*cos(theta1)), 2))))
B = 1.0/2 * (cos(theta2)*(l1*sin(theta1)+Q) + dl2 * cos(theta2 + asin(l1/l2*cos(theta1))))
C = -1.0/2*(cos(theta2)*(l1*cos(theta1)+pow(l1, 2)*cos(theta1)*sin(theta1)/Q)+dl2 * sin(theta2 + asin(l1/l2*cos(theta1))) * (-(l1/l2 * sin(theta1))/sqrt(1-pow((l1/l2*cos(theta1)), 2))))
D = 1.0/2 * (sin(theta2)*(l1*sin(theta1)+Q) + dl2 * sin(theta2 + asin(l1/l2*cos(theta1))))
jaco = [[A-B, 0., A+B],
[0, 0., 0.],
[C+D, 0., C-D]]
jaco = np.mat(jaco)
return jaco
def mat_Q(act_pos_xf, act_pos_zf, act_pos_xb, act_pos_zb):
xf = 0.23+act_pos_xf
yf = 0.142
zf = act_pos_zf
xb = -0.23+act_pos_xb
yb = -0.142
zb = act_pos_zb
mat_Q = [[ 1, 0, 0, 1, 0, 0],
[ 0, 0, 1, 0, 0, 1],
[ 0, -zf, yf, 0, -zb, yb],
[ zf, 0, -xf, zb, 0, -xb],
[-yf, xf, 0, -yb, xb, 0],
[ 0, 1, 0, 0, -1, 0]
]
mat_Q = np.mat(mat_Q)
return mat_Q.I
def CurToTorque(cur):
return 6.5/(5*13.3)*cur
def TorqueToCur(torque):
return (13.3*5)/6.5*torque
def createPub(name,msgType):
self.pub_name = name
self.pub_type = msgType
return rospy.Publisher(name,msgType,queue_size=10)
if __name__ == '__main__':
# test
cur = TorqueCal(0,-50,-0/180.0*pi,-0/180.0*pi)
print(cur)
| 25.045977 | 199 | 0.575952 |
aced4ebd266c073826c73c95362a822c2d6b40cb | 105 | py | Python | table2json/management/__init__.py | amoghmadan/Table2JSON | ffddc51f0f3f80244eb38cdc0e32d88fef73d395 | [
"MIT"
] | 4 | 2020-09-01T04:14:13.000Z | 2020-11-20T15:41:41.000Z | table2json/management/__init__.py | amoghmadan/Table2JSON | ffddc51f0f3f80244eb38cdc0e32d88fef73d395 | [
"MIT"
] | 2 | 2021-12-22T07:22:15.000Z | 2021-12-24T07:40:20.000Z | table2json/management/__init__.py | amoghmadan/Table2JSON | ffddc51f0f3f80244eb38cdc0e32d88fef73d395 | [
"MIT"
] | null | null | null | from table2json.management.base import Table2JSONBaseCommand
__all__ = [
"Table2JSONBaseCommand",
]
| 17.5 | 60 | 0.790476 |
aced4f9ebec04dacf0a0c0fd9d2ae83c898deca8 | 2,323 | py | Python | venv/Lib/site-packages/pyrogram/raw/types/stats_graph_error.py | D1ne2021/jjhhhjj | a090da30983b3ef276dfe4cef2ded4526f36002a | [
"MIT"
] | 2 | 2021-12-13T07:09:55.000Z | 2022-01-12T12:15:20.000Z | venv/Lib/site-packages/pyrogram/raw/types/stats_graph_error.py | hoangkiet1906/Botcie_ver1 | c133b915edde06dac690a7dc6ca160f6792fc4c8 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pyrogram/raw/types/stats_graph_error.py | hoangkiet1906/Botcie_ver1 | c133b915edde06dac690a7dc6ca160f6792fc4c8 | [
"MIT"
] | null | null | null | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class StatsGraphError(TLObject): # type: ignore
"""This object is a constructor of the base type :obj:`~pyrogram.raw.base.StatsGraph`.
Details:
- Layer: ``126``
- ID: ``0xbedc9822``
Parameters:
error: ``str``
See Also:
This object can be returned by 1 method:
.. hlist::
:columns: 2
- :obj:`stats.LoadAsyncGraph <pyrogram.raw.functions.stats.LoadAsyncGraph>`
"""
__slots__: List[str] = ["error"]
ID = 0xbedc9822
QUALNAME = "types.StatsGraphError"
def __init__(self, *, error: str) -> None:
self.error = error # string
@staticmethod
def read(data: BytesIO, *args: Any) -> "StatsGraphError":
# No flags
error = String.read(data)
return StatsGraphError(error=error)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(String(self.error))
return data.getvalue()
| 30.168831 | 103 | 0.616875 |
aced5040fb706be9d4cc39038df13f058b105d74 | 3,390 | py | Python | tests/test_msautotest.py | tomkralidis/mappyfile | 4313f9e52f5c54198988e62c3e2ebc9223f174ef | [
"MIT"
] | 1 | 2018-08-08T06:48:05.000Z | 2018-08-08T06:48:05.000Z | tests/test_msautotest.py | tomkralidis/mappyfile | 4313f9e52f5c54198988e62c3e2ebc9223f174ef | [
"MIT"
] | null | null | null | tests/test_msautotest.py | tomkralidis/mappyfile | 4313f9e52f5c54198988e62c3e2ebc9223f174ef | [
"MIT"
] | null | null | null | """
Parse all the test Mapfiles in msautotests, write them to a new file,
and then test that these also parse correctly
"""
import os
import logging
import glob
import shutil
import json
from mappyfile.pprint import PrettyPrinter
from mappyfile.parser import Parser
from mappyfile.transformer import MapfileToDict
from mappyfile.validator import Validator
import mappyfile
def create_copy(msautotest_fld, msautotest_copy):
# first make a backup copy of msautotest
if os.path.isdir(msautotest_copy):
shutil.rmtree(msautotest_copy)
logging.info("Removing %s...", msautotest_copy)
logging.info("Copying %s to %s...", msautotest_fld, msautotest_copy)
shutil.copytree(msautotest_fld, msautotest_copy)
logging.info("Copying complete!")
return msautotest_copy
def parse_mapfile(parser, transformer, pp, fn):
logging.debug("Parsing %s", fn)
try:
ast = parser.parse_file(fn)
except Exception as ex:
logging.warning("%s could not be successfully parsed", fn)
logging.exception(ex)
raise
try:
d = transformer.transform(ast)
except Exception as ex:
logging.warning("%s could not be successfully transformed", fn)
logging.exception(ex)
raise
return d
def main(msautotest_fld, create_new_copy=True):
msautotest_copy = os.path.join(os.path.dirname(msautotest_fld), "msautotest_mappyfile")
if create_new_copy:
create_copy(msautotest_fld, msautotest_copy)
parser = Parser()
transformer = MapfileToDict()
pp = PrettyPrinter()
# these two maps aren't in utf8
# see https://github.com/mapserver/mapserver/pull/5460
# ignore_list = ["wms_inspire_scenario1.map","wms_inspire_scenario2.map"]
# transparent_layer.map has an extra END, see https://github.com/mapserver/mapserver/pull/5468
# polyline_no_clip.map needs symbol names in quotes, and SYMBOL is ambiguous
ignore_list = ["polyline_no_clip.map",
"poly-label-multiline-pos-auto.map", "poly-label-pos-auto.map",
"embed_sb_rgba.map", "embed_sb_rgba_offset.map"] # has attributes all on the same line
mapfiles = glob.glob(msautotest_fld + '/**/*.map')
mapfiles = [f for f in mapfiles if os.path.basename(f) not in ignore_list]
# target_map = "polyline_no_clip.map"
# mapfiles = [f for f in mapfiles if os.path.basename(f) in (target_map)]
v = Validator()
for fn in mapfiles:
d = parse_mapfile(parser, transformer, pp, fn)
errors = v.validate(d, add_comments=True)
if errors:
logging.warning("{} failed validation".format(fn))
output_file = fn.replace(msautotest_fld, msautotest_copy)
try:
mappyfile.utils.write(d, output_file)
except Exception:
logging.warning(json.dumps(d, indent=4))
logging.warning("%s could not be successfully re-written", fn)
raise
# now try reading it again
print(json.dumps(d, indent=4))
d = parse_mapfile(parser, transformer, pp, output_file)
errors = v.validate(d, add_comments=True)
if errors:
logging.warning("{} failed validation".format(fn))
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
fld = r"C:\Temp\mapserver\msautotest"
main(fld, False)
print("Done!")
| 30.267857 | 106 | 0.677581 |
aced50ff097b5285087116bb5d4bc1475d8d5c10 | 9,713 | py | Python | theia/grid.py | rafraser/theia | cdf8c49e7f09ebbfa59c9710d417fefa5c1450cc | [
"MIT"
] | 3 | 2020-12-07T14:58:40.000Z | 2021-04-09T09:00:45.000Z | theia/grid.py | rafraser/theia | cdf8c49e7f09ebbfa59c9710d417fefa5c1450cc | [
"MIT"
] | 6 | 2021-04-29T07:35:43.000Z | 2022-03-28T03:09:35.000Z | theia/grid.py | rafraser/theia | cdf8c49e7f09ebbfa59c9710d417fefa5c1450cc | [
"MIT"
] | null | null | null | import argparse
import math
from typing import Callable
from PIL import Image, ImageDraw
from random import randrange, random, choice, shuffle
# Typings
Point = tuple[int, int]
Row = list[Point]
Grid = list[Row]
def build(size: int, num: int) -> Grid:
"""Build a basic square grid
Args:
size (int): Size (width or height) of the grid
num (int): Number of points in each row and column
Returns:
Grid: Simple grid with equally spaced points
"""
step = size / (num - 1)
return [[(round(step * xn), round(step * yn)) for xn in range(num)] for yn in range(num)]
def build_radial(size: int, num_angular: int, num_radius: int, offset: int = 0, center: bool = True) -> Grid:
"""Build a radial grid
Args:
size (int): Size (width or height) of the grid
num_angular (int): Number of points in each 'circle'
num_radius (int): Number of 'circles'
offset (int, optional): Angular offset, in degrees. Defaults to 0.
center (bool, optional): Add an extra point at the center of the radial grid?. Defaults to True.
Returns:
Grid: Radial grid with equally spaced points
"""
rad_step = (size // 2) / num_radius
ang_step = (2 * math.pi) / num_angular
ang_off = math.radians(offset)
xx = size // 2
yy = size // 2
def rad_to_cart(ang, rad):
return (xx + round(rad * math.cos(ang)), yy + round(rad * math.sin(ang)))
grid = [
[rad_to_cart(ang_off + (ang_step * an), rad_step * (rn + 1)) for an in range(num_angular)]
for rn in range(num_radius)
]
if center:
grid.insert(0, [(xx, yy)])
return grid
def jitter(
grid: Grid,
min_variance: int = None,
max_variance: int = None,
size: int = None,
clamp: bool = False,
variance_list: list[int] = None,
) -> Grid:
"""Randomly jitter all points in a grid
Jitter will apply to both the x and y axises of the grid
If a variance list is given, each point will be jittered by a random value from the jitter list
If one of min_variance or max_variance is specified, points will be jittered from -v to v
If both min_variance or max_variance is specified, points will be jittered from -max to -min or min to max
Args:
grid (Grid): Grid points to jitter
min_variance (int, optional): Minimum jitter amount. Defaults to None.
max_variance (int, optional): Maximum jitter amount. Defaults to None.
size (int, optional): Grid size - useful for clamping. Defaults to None.
clamp (bool, optional): Whether to stop points leaving the bounds. Defaults to False.
variance_list (list[int], optional): List of possible jitter amounts. Defaults to None.
Returns:
Grid: Transformed grid, with each point 'jittered'
"""
# If no size is specified, grab the largest point we have
# if jittering a grid twice this could go badly...
if size is None:
size = max(grid[0], key=lambda x: x[0])[0]
# Argument handling - there's a few cases
# This jit function is then applied to each point to spice em up
if variance_list is not None and len(variance_list) > 0:
def jit(val):
return val + choice(variance_list)
elif min_variance is None and max_variance is None:
def jit(val):
return val
elif min_variance is None and max_variance is not None:
def jit(val):
return val + choice([-1, 1]) * randrange(0, max_variance)
elif max_variance is None and min_variance is not None:
def jit(val):
return val + choice([-1, 1]) * randrange(0, min_variance)
elif min_variance >= max_variance:
def jit(val):
return val + choice([-1, 1]) * min_variance
def clampf(x):
# Clamp a point 0 <= x <= size *only* if the clamp flag is enabled
if clamp:
return max(0, min(x, size))
else:
return x
# Jit (and optionally clamp) all points in the grid
return [[(clampf(jit(xx)), clampf(jit(yy))) for (xx, yy) in row] for row in grid]
def shift_rows(grid: Grid, offset: int, mod: int = 2, size: int = None, clamp: bool = False) -> Grid:
"""Shift Nth rows of a grid by a fixed amount
Args:
grid (Grid): Grid to shift rows of
offset (int): How much to shift each column by
mod (int, optional): Shift every X rows. Defaults to 2.
size (int, optional): Size of the grid - used if clamping is enabled. Defaults to None.
clamp (bool, optional): Whether to remove points outside the bounds. Defaults to False.
Returns:
Grid: Transformed grid, with shifted rows
"""
result_grid = []
for row_index, row in enumerate(grid):
if row_index % mod == 0:
result_grid.append([(x_off, yy) for (xx, yy) in row if (x_off := xx + offset) >= 0 and x_off <= size])
else:
result_grid.append(row)
return result_grid
def shift_columns(grid: Grid, offset: int, mod: int = 2, size: int = None, clamp: bool = False) -> Grid:
"""Shift Nth columns of a grid by a fixed amount
Args:
grid (Grid): Grid to shift columns of
offset (int): How much to shift each column by
mod (int, optional): Shift every X columns. Defaults to 2.
size (int, optional): Size of the grid - used if clamping is enabled. Defaults to None.
clamp (bool, optional): Whether to remove points outside the bounds. Defaults to False.
Returns:
Grid: Transformed grid, with shifted columns
"""
result_grid = []
for row in grid:
new_row = []
for col_index, (xx, yy) in enumerate(row):
if col_index % mod == 0:
new_row.append((xx, yy + offset))
else:
new_row.append((xx, yy))
result_grid.append(new_row)
return result_grid
def triangle(grid: Grid, step: int = 1, symmetric: bool = True) -> Grid:
"""Turn a rectangular grid into a triangular grid
If symmetric is enabled, an isometric triangle will be made
If symmetric is not enabled, a right angle triangle will be made
Args:
grid (Grid): Grid to adjust
step (int, optional): How many points to remove from each 'level'. Defaults to 1.
symmetric (bool, optional): Should points be removed from both sides of the row?. Defaults to True.
Returns:
Grid: Transformed grid
"""
def triangle_row(row, shift):
if shift >= 1:
ss = shift * step
return row[ss:-ss] if symmetric else row[ss:]
else:
return row
return [[p for p in triangle_row(row, idx)] for idx, row in enumerate(grid)]
def sparsify(grid: Grid, percentage: float) -> Grid:
"""Drop a certain percentage of points randomly
This function keeps exactly the given percentage
for a faster, approximate method use fast_sparsify
Args:
grid (Grid): Grid to drop points from
percentage (float): Percentage of points to keep
Returns:
Grid: Transformed grid
"""
# Determine which points to keep
row_size = len(grid[0])
point_indexes = [
col_index + (row_index * row_size) for row_index, row in enumerate(grid) for col_index, _ in enumerate(row)
]
keep = round(len(point_indexes) * percentage)
shuffle(point_indexes)
points_to_keep = point_indexes[:keep]
return [
[p for col_index, p in enumerate(row) if (col_index + (row_index * row_size)) in points_to_keep]
for row_index, row in enumerate(grid)
]
def fast_sparsify(grid: Grid, percentage: float) -> Grid:
"""Drop an approximate percentage of points randomly
This function randomly evaluates each point - for an exact percentage, use sparsify
Args:
grid (Grid): Grid to drop points from
percentage (float): Percentage chance of keeping each point
Returns:
Grid: Transformed grid
"""
return [[p for p in row if random() < percentage] for row in grid]
def flatten(grid: Grid) -> list[Point]:
"""Flatten a grid into a single list of points
Args:
grid (Grid): Grid to flatten
Returns:
list[Point]: List of (x, y) coordinates
"""
return [p for row in grid for p in row]
def transpose(grid: Grid) -> Grid:
"""Transpose (swap rows and columns) a grid
Args:
grid (Grid): Grid to tranpose
Returns:
Grid: Transposed grid
"""
swapped_grid = [[(yy, xx) for (xx, yy) in row] for row in grid]
return list(map(list, zip(*swapped_grid)))
def apply(grid: Grid, func: Callable[[Point], Point]) -> Grid:
"""Apply a pointwise function to all points of a grid
Args:
grid (Grid): Grid to apply function to
func (callable): Pointwise transformation function
Returns:
Grid: Transformed grid
"""
return [[func(p) for p in row] for row in grid]
def visualise(grid: Grid, size: int, padding: int):
"""Helper function to visualise a grid
Not intended for external use
"""
img = Image.new("RGB", (size + padding * 2, size + padding * 2), color="#2d3436")
draw = ImageDraw.Draw(img)
def dot(x, y):
r = 4
draw.ellipse((x - r, y - r, x + r, y + r), fill="#a29bfe")
for (xx, yy) in flatten(grid):
dot(xx + padding, yy + padding)
img.save("output/grid_test.png")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("size", type=int)
parser.add_argument("num", type=int)
args = parser.parse_args()
grid = build(args.size, args.num)
visualise(grid, 512, 16)
| 31.638436 | 115 | 0.627407 |
aced5190383a01ca0b58966c4c4628e6037e6e52 | 661 | py | Python | var/spack/repos/builtin/packages/r-dichromat/package.py | nkianggiss/spack | 3477d3375142a30f5714bb5966a6d8bb22c33c06 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 3 | 2019-06-27T13:26:50.000Z | 2019-07-01T16:24:54.000Z | var/spack/repos/builtin/packages/r-dichromat/package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 75 | 2016-07-27T11:43:00.000Z | 2020-12-08T15:56:53.000Z | var/spack/repos/builtin/packages/r-dichromat/package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 8 | 2015-10-16T13:51:49.000Z | 2021-10-18T13:58:03.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RDichromat(RPackage):
"""Collapse red-green or green-blue distinctions to simulate the effects of
different types of color-blindness."""
homepage = "https://cran.r-project.org/web/packages/dichromat/index.html"
url = "https://cran.r-project.org/src/contrib/dichromat_2.0-0.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/dichromat"
version('2.0-0', '84e194ac95a69763d740947a7ee346a6')
| 36.722222 | 79 | 0.736762 |
aced5252a9bf3d6f9b42e4c6e68f01007f0fad72 | 3,708 | py | Python | examples/onnx/resnet50v2/test.py | CristiFati/rknn-toolkit | a345d99be30c1ca84f56dd68a213c740450eeaa5 | [
"BSD-3-Clause"
] | null | null | null | examples/onnx/resnet50v2/test.py | CristiFati/rknn-toolkit | a345d99be30c1ca84f56dd68a213c740450eeaa5 | [
"BSD-3-Clause"
] | null | null | null | examples/onnx/resnet50v2/test.py | CristiFati/rknn-toolkit | a345d99be30c1ca84f56dd68a213c740450eeaa5 | [
"BSD-3-Clause"
] | null | null | null | import os
import urllib
import traceback
import time
import sys
import numpy as np
import cv2
from rknn.api import RKNN
ONNX_MODEL = 'resnet50v2.onnx'
RKNN_MODEL = 'resnet50v2.rknn'
def show_outputs(outputs):
output = outputs[0][0]
output_sorted = sorted(output, reverse=True)
top5_str = 'resnet50v2\n-----TOP 5-----\n'
for i in range(5):
value = output_sorted[i]
index = np.where(output == value)
for j in range(len(index)):
if (i + j) >= 5:
break
if value > 0:
topi = '{}: {}\n'.format(index[j], value)
else:
topi = '-1: 0.0\n'
top5_str += topi
print(top5_str)
def readable_speed(speed):
speed_bytes = float(speed)
speed_kbytes = speed_bytes / 1024
if speed_kbytes > 1024:
speed_mbytes = speed_kbytes / 1024
if speed_mbytes > 1024:
speed_gbytes = speed_mbytes / 1024
return "{:.2f} GB/s".format(speed_gbytes)
else:
return "{:.2f} MB/s".format(speed_mbytes)
else:
return "{:.2f} KB/s".format(speed_kbytes)
def show_progress(blocknum, blocksize, totalsize):
speed = (blocknum * blocksize) / (time.time() - start_time)
speed_str = " Speed: {}".format(readable_speed(speed))
recv_size = blocknum * blocksize
f = sys.stdout
progress = (recv_size / totalsize)
progress_str = "{:.2f}%".format(progress * 100)
n = round(progress * 50)
s = ('#' * n).ljust(50, '-')
f.write(progress_str.ljust(8, ' ') + '[' + s + ']' + speed_str)
f.flush()
f.write('\r\n')
if __name__ == '__main__':
# Create RKNN object
rknn = RKNN()
# If resnet50v2 does not exist, download it.
# Download address:
# https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet50v2/resnet50v2.onnx
if not os.path.exists(ONNX_MODEL):
print('--> Download {}'.format(ONNX_MODEL))
url = 'https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet50v2/resnet50v2.onnx'
download_file = ONNX_MODEL
try:
start_time = time.time()
urllib.request.urlretrieve(url, download_file, show_progress)
except:
print('Download {} failed.'.format(download_file))
print(traceback.format_exc())
exit(-1)
print('done')
# pre-process config
print('--> Config model')
rknn.config(mean_values=[[123.675, 116.28, 103.53]], std_values=[[58.82, 58.82, 58.82]], reorder_channel='0 1 2')
print('done')
# Load ONNX model
print('--> Loading model')
ret = rknn.load_onnx(model=ONNX_MODEL)
if ret != 0:
print('Load resnet50v2 failed!')
exit(ret)
print('done')
# Build model
print('--> Building model')
ret = rknn.build(do_quantization=True, dataset='./dataset.txt')
if ret != 0:
print('Build resnet50v2 failed!')
exit(ret)
print('done')
# Export RKNN model
print('--> Export RKNN model')
ret = rknn.export_rknn(RKNN_MODEL)
if ret != 0:
print('Export resnet50v2.rknn failed!')
exit(ret)
print('done')
# Set inputs
img = cv2.imread('./dog_224x224.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# init runtime environment
print('--> Init runtime environment')
ret = rknn.init_runtime()
if ret != 0:
print('Init runtime environment failed')
exit(ret)
print('done')
# Inference
print('--> Running model')
outputs = rknn.inference(inputs=[img])
x = outputs[0]
output = np.exp(x)/np.sum(np.exp(x))
outputs = [output]
show_outputs(outputs)
print('done')
rknn.release()
| 27.671642 | 117 | 0.589266 |
aced539f61fe6d7f3b031e3563ab531dea042b11 | 1,062 | py | Python | setup.py | 4Catalyzer/tqp | 94bcb66ea24fec7d280b23bd5182633bf5438aec | [
"MIT"
] | 3 | 2018-04-02T15:13:45.000Z | 2020-07-02T01:34:37.000Z | setup.py | 4Catalyzer/tqp | 94bcb66ea24fec7d280b23bd5182633bf5438aec | [
"MIT"
] | 17 | 2018-04-20T04:02:29.000Z | 2020-11-18T21:57:35.000Z | setup.py | 4Catalyzer/tqp | 94bcb66ea24fec7d280b23bd5182633bf5438aec | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
setup(
name="tqp",
version="0.5.1",
description="An opinionated library for pub/sub over SQS and SNS",
url="https://github.com/4Catalyzer/tqp",
author="Giacomo Tagliabue",
author_email="giacomo@gmail.com",
license="MIT",
python_requires=">=3.6",
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3 :: Only",
],
keywords="pub sub pubsub flask",
packages=find_packages(),
install_requires=("boto3",),
extras_require={
"dev": [
"pytest",
"fourmat~=0.4.3",
"pre-commit",
"moto[server]",
"boto3",
]
},
)
| 29.5 | 70 | 0.560264 |
aced54efbf1e0523dbb098525ab65a52d900f912 | 18,187 | py | Python | scapy/main.py | kosciolek/J-Tracert | 89ed802f700e02600138ad7132e6a856463620dd | [
"MIT"
] | 3 | 2019-04-09T22:59:33.000Z | 2019-06-14T09:23:24.000Z | scapy/main.py | kosciolek/J-Tracert | 89ed802f700e02600138ad7132e6a856463620dd | [
"MIT"
] | null | null | null | scapy/main.py | kosciolek/J-Tracert | 89ed802f700e02600138ad7132e6a856463620dd | [
"MIT"
] | 1 | 2018-11-15T12:37:04.000Z | 2018-11-15T12:37:04.000Z | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Main module for interactive startup.
"""
from __future__ import absolute_import
from __future__ import print_function
import sys, os, getopt, re, code
import gzip, glob
import importlib
import logging
from random import choice
import types
import io
# Never add any global import, in main.py, that would trigger a warning messsage
# before the console handlers gets added in interact()
from scapy.error import log_interactive, log_loading, log_scapy, warning
import scapy.modules.six as six
from scapy.themes import DefaultTheme, BlackAndWhite, apply_ipython_style
IGNORED = list(six.moves.builtins.__dict__)
GLOBKEYS = []
LAYER_ALIASES = {
"tls": "tls.all"
}
QUOTES = [
("Craft packets like it is your last day on earth.", "Lao-Tze"),
("Craft packets like I craft my beer.", "Jean De Clerck"),
("Craft packets before they craft you.", "Socrate"),
("Craft me if you can.", "IPv6 layer"),
("To craft a packet, you have to be a packet, and learn how to swim in the "
"wires and in the waves.", "Jean-Claude Van Damme"),
("We are in France, we say Skappee. OK? Merci.", "Sebastien Chabal"),
]
def _probe_config_file(cf):
cf_path = os.path.join(os.path.expanduser("~"), cf)
try:
os.stat(cf_path)
except OSError:
return None
else:
return cf_path
def _read_config_file(cf, _globals=globals(), _locals=locals(), interactive=True):
"""Read a config file: execute a python file while loading scapy, that may contain
some pre-configured values.
If _globals or _locals are specified, they will be updated with the loaded vars.
This allows an external program to use the function. Otherwise, vars are only available
from inside the scapy console.
params:
- _globals: the globals() vars
- _locals: the locals() vars
- interactive: specified whether or not errors should be printed using the scapy console or
raised.
ex, content of a config.py file:
'conf.verb = 42\n'
Manual loading:
>>> _read_config_file("./config.py"))
>>> conf.verb
42
"""
log_loading.debug("Loading config file [%s]", cf)
try:
exec(compile(open(cf).read(), cf, 'exec'), _globals, _locals)
except IOError as e:
if interactive:
raise
log_loading.warning("Cannot read config file [%s] [%s]", cf, e)
except Exception as e:
if interactive:
raise
log_loading.exception("Error during evaluation of config file [%s]", cf)
def _validate_local(x):
"""Returns whether or not a variable should be imported.
Will return False for any default modules (sys), or if
they are detected as private vars (starting with a _)"""
global IGNORED
return x[0] != "_" and not x in IGNORED
DEFAULT_PRESTART_FILE = _probe_config_file(".scapy_prestart.py")
DEFAULT_STARTUP_FILE = _probe_config_file(".scapy_startup.py")
SESSION = None
def _usage():
print("""Usage: scapy.py [-s sessionfile] [-c new_startup_file] [-p new_prestart_file] [-C] [-P]
-C: do not read startup file
-P: do not read pre-startup file""")
sys.exit(0)
######################
## Extension system ##
######################
def _load(module, globals_dict=None, symb_list=None):
"""Loads a Python module to make variables, objects and functions
available globally.
The idea is to load the module using importlib, then copy the
symbols to the global symbol table.
"""
if globals_dict is None:
globals_dict = six.moves.builtins.__dict__
try:
mod = importlib.import_module(module)
if '__all__' in mod.__dict__:
# import listed symbols
for name in mod.__dict__['__all__']:
if symb_list is not None:
symb_list.append(name)
globals_dict[name] = mod.__dict__[name]
else:
# only import non-private symbols
for name, sym in six.iteritems(mod.__dict__):
if _validate_local(name):
if symb_list is not None:
symb_list.append(name)
globals_dict[name] = sym
except Exception:
log_interactive.error("Loading module %s", module, exc_info=True)
def load_module(name):
"""Loads a Scapy module to make variables, objects and functions
available globally.
"""
_load("scapy.modules."+name)
def load_layer(name, globals_dict=None, symb_list=None):
"""Loads a Scapy layer module to make variables, objects and functions
available globally.
"""
_load("scapy.layers." + LAYER_ALIASES.get(name, name),
globals_dict=globals_dict, symb_list=symb_list)
def load_contrib(name):
"""Loads a Scapy contrib module to make variables, objects and
functions available globally.
If no contrib module can be found with the given name, try to find
a layer module, since a contrib module may become a layer module.
"""
try:
importlib.import_module("scapy.contrib." + name)
_load("scapy.contrib." + name)
except ImportError:
# if layer not found in contrib, try in layers
load_layer(name)
def list_contrib(name=None):
if name is None:
name="*.py"
elif "*" not in name and "?" not in name and not name.endswith(".py"):
name += ".py"
name = os.path.join(os.path.dirname(__file__), "contrib", name)
for f in sorted(glob.glob(name)):
mod = os.path.basename(f)
if mod.startswith("__"):
continue
if mod.endswith(".py"):
mod = mod[:-3]
desc = { "description":"-", "status":"?", "name":mod }
for l in io.open(f, errors="replace"):
p = l.find("scapy.contrib.")
if p >= 0:
p += 14
q = l.find("=", p)
key = l[p:q].strip()
value = l[q+1:].strip()
desc[key] = value
print("%(name)-20s: %(description)-40s status=%(status)s" % desc)
##############################
## Session saving/restoring ##
##############################
def update_ipython_session(session):
"""Updates IPython session with a custom one"""
try:
get_ipython().user_ns.update(session)
except:
pass
def save_session(fname=None, session=None, pickleProto=-1):
"""Save current Scapy session to the file specified in the fname arg.
params:
- fname: file to save the scapy session in
- session: scapy session to use. If None, the console one will be used
- pickleProto: pickle proto version (default: -1 = latest)"""
from scapy import utils
if fname is None:
fname = conf.session
if not fname:
conf.session = fname = utils.get_temp_file(keep=True)
log_interactive.info("Use [%s] as session file" % fname)
if session is None:
try:
session = get_ipython().user_ns
except:
session = six.moves.builtins.__dict__["scapy_session"]
to_be_saved = session.copy()
if "__builtins__" in to_be_saved:
del(to_be_saved["__builtins__"])
for k in list(to_be_saved):
i = to_be_saved[k]
if hasattr(i, "__module__") and (k[0] == "_" or i.__module__.startswith("IPython")):
del(to_be_saved[k])
if isinstance(i, ConfClass):
del(to_be_saved[k])
elif isinstance(i, (type, type, types.ModuleType)):
if k[0] != "_":
log_interactive.error("[%s] (%s) can't be saved.", k, type(to_be_saved[k]))
del(to_be_saved[k])
try:
os.rename(fname, fname+".bak")
except OSError:
pass
f=gzip.open(fname,"wb")
six.moves.cPickle.dump(to_be_saved, f, pickleProto)
f.close()
del f
def load_session(fname=None):
"""Load current Scapy session from the file specified in the fname arg.
This will erase any existing session.
params:
- fname: file to load the scapy session from"""
if fname is None:
fname = conf.session
try:
s = six.moves.cPickle.load(gzip.open(fname,"rb"))
except IOError:
try:
s = six.moves.cPickle.load(open(fname,"rb"))
except IOError:
# Raise "No such file exception"
raise
scapy_session = six.moves.builtins.__dict__["scapy_session"]
scapy_session.clear()
scapy_session.update(s)
update_ipython_session(scapy_session)
log_loading.info("Loaded session [%s]" % fname)
def update_session(fname=None):
"""Update current Scapy session from the file specified in the fname arg.
params:
- fname: file to load the scapy session from"""
if fname is None:
fname = conf.session
try:
s = six.moves.cPickle.load(gzip.open(fname,"rb"))
except IOError:
s = six.moves.cPickle.load(open(fname,"rb"))
scapy_session = six.moves.builtins.__dict__["scapy_session"]
scapy_session.update(s)
update_ipython_session(scapy_session)
def init_session(session_name, mydict=None):
global SESSION
global GLOBKEYS
scapy_builtins = {k: v for k, v in six.iteritems(importlib.import_module(".all", "scapy").__dict__) if _validate_local(k)}
six.moves.builtins.__dict__.update(scapy_builtins)
GLOBKEYS.extend(scapy_builtins)
GLOBKEYS.append("scapy_session")
scapy_builtins=None # XXX replace with "with" statement
if session_name:
try:
os.stat(session_name)
except OSError:
log_loading.info("New session [%s]" % session_name)
else:
try:
try:
SESSION = six.moves.cPickle.load(gzip.open(session_name,"rb"))
except IOError:
SESSION = six.moves.cPickle.load(open(session_name,"rb"))
log_loading.info("Using session [%s]" % session_name)
except EOFError:
log_loading.error("Error opening session [%s]" % session_name)
except AttributeError:
log_loading.error("Error opening session [%s]. Attribute missing" % session_name)
if SESSION:
if "conf" in SESSION:
conf.configure(SESSION["conf"])
conf.session = session_name
SESSION["conf"] = conf
else:
conf.session = session_name
else:
conf.session = session_name
SESSION = {"conf":conf}
else:
SESSION = {"conf": conf}
six.moves.builtins.__dict__["scapy_session"] = SESSION
if mydict is not None:
six.moves.builtins.__dict__["scapy_session"].update(mydict)
update_ipython_session(mydict)
GLOBKEYS.extend(mydict)
################
##### Main #####
################
def scapy_delete_temp_files():
for f in conf.temp_files:
try:
os.unlink(f)
except:
pass
del(conf.temp_files[:])
def _prepare_quote(quote, author, max_len=78):
"""This function processes a quote and returns a string that is ready
to be used in the fancy prompt.
"""
quote = quote.split(' ')
max_len -= 6
lines = []
cur_line = []
def _len(line):
return sum(len(elt) for elt in line) + len(line) - 1
while quote:
if not cur_line or (_len(cur_line) + len(quote[0]) - 1 <= max_len):
cur_line.append(quote.pop(0))
continue
lines.append(' | %s' % ' '.join(cur_line))
cur_line = []
if cur_line:
lines.append(' | %s' % ' '.join(cur_line))
cur_line = []
lines.append(' | %s-- %s' % (" " * (max_len - len(author) - 5), author))
return lines
def interact(mydict=None,argv=None,mybanner=None,loglevel=20):
global SESSION
global GLOBKEYS
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
log_scapy.addHandler(console_handler)
from scapy.config import conf
conf.color_theme = DefaultTheme()
conf.interactive = True
if loglevel is not None:
conf.logLevel = loglevel
STARTUP_FILE = DEFAULT_STARTUP_FILE
PRESTART_FILE = DEFAULT_PRESTART_FILE
session_name = None
if argv is None:
argv = sys.argv
try:
opts = getopt.getopt(argv[1:], "hs:Cc:Pp:d")
for opt, parm in opts[0]:
if opt == "-h":
_usage()
elif opt == "-s":
session_name = parm
elif opt == "-c":
STARTUP_FILE = parm
elif opt == "-C":
STARTUP_FILE = None
elif opt == "-p":
PRESTART_FILE = parm
elif opt == "-P":
PRESTART_FILE = None
elif opt == "-d":
conf.logLevel = max(1, conf.logLevel-10)
if len(opts[1]) > 0:
raise getopt.GetoptError("Too many parameters : [%s]" % " ".join(opts[1]))
except getopt.GetoptError as msg:
log_loading.error(msg)
sys.exit(1)
init_session(session_name, mydict)
if STARTUP_FILE:
_read_config_file(STARTUP_FILE, interactive=True)
if PRESTART_FILE:
_read_config_file(PRESTART_FILE, interactive=True)
if not conf.interactive_shell or conf.interactive_shell.lower() in [
"ipython", "auto"
]:
try:
import IPython
from IPython import start_ipython
except ImportError:
log_loading.warning(
"IPython not available. Using standard Python shell "
"instead.\nAutoCompletion, History are disabled."
)
if WINDOWS:
log_loading.warning("IPyton not available. On Windows, colors are disabled")
conf.color_theme = BlackAndWhite()
IPYTHON = False
else:
IPYTHON = True
else:
IPYTHON = False
if conf.fancy_prompt:
the_logo = [
" ",
" aSPY//YASa ",
" apyyyyCY//////////YCa ",
" sY//////YSpcs scpCY//Pp ",
" ayp ayyyyyyySCP//Pp syY//C ",
" AYAsAYYYYYYYY///Ps cY//S",
" pCCCCY//p cSSps y//Y",
" SPPPP///a pP///AC//Y",
" A//A cyP////C",
" p///Ac sC///a",
" P////YCpc A//A",
" scccccp///pSP///p p//Y",
" sY/////////y caa S//P",
" cayCyayP//Ya pY/Ya",
" sY/PsY////YCc aC//Yp ",
" sc sccaCY//PCypaapyCP//YSs ",
" spCPY//////YPSps ",
" ccaacs ",
" ",
]
the_banner = [
"",
"",
" |",
" | Welcome to Scapy",
" | Version %s" % conf.version,
" |",
" | https://github.com/secdev/scapy",
" |",
" | Have fun!",
" |",
]
quote, author = choice(QUOTES)
the_banner.extend(_prepare_quote(quote, author, max_len=39))
the_banner.append(" |")
the_banner = "\n".join(
logo + banner for logo, banner in six.moves.zip_longest(
(conf.color_theme.logo(line) for line in the_logo),
(conf.color_theme.success(line) for line in the_banner),
fillvalue=""
)
)
else:
the_banner = "Welcome to Scapy (%s)" % conf.version
if mybanner is not None:
the_banner += "\n"
the_banner += mybanner
if IPYTHON:
banner = the_banner + " using IPython %s\n" % IPython.__version__
try:
from traitlets.config.loader import Config
except ImportError:
log_loading.warning(
"traitlets not available. Some Scapy shell features won't be "
"available."
)
try:
start_ipython(
display_banner=False,
user_ns=SESSION,
exec_lines=["print(\"\"\"" + banner + "\"\"\")"]
)
except:
code.interact(banner = the_banner, local=SESSION)
else:
cfg = Config()
try:
get_ipython
except NameError:
# Set "classic" prompt style when launched from run_scapy(.bat) files
# Register and apply scapy color+prompt style
apply_ipython_style(shell=cfg.TerminalInteractiveShell)
cfg.TerminalInteractiveShell.confirm_exit = False
cfg.TerminalInteractiveShell.separate_in = u''
if int(IPython.__version__[0]) >= 6:
cfg.TerminalInteractiveShell.term_title_format = "Scapy v" + conf.version
else:
cfg.TerminalInteractiveShell.term_title = False
cfg.HistoryAccessor.hist_file = conf.histfile
cfg.InteractiveShell.banner1 = banner
# configuration can thus be specified here.
try:
start_ipython(config=cfg, user_ns=SESSION)
except (AttributeError, TypeError):
code.interact(banner = the_banner, local=SESSION)
else:
code.interact(banner = the_banner, local=SESSION)
if conf.session:
save_session(conf.session, SESSION)
for k in GLOBKEYS:
try:
del(six.moves.builtins.__dict__[k])
except:
pass
if __name__ == "__main__":
interact()
| 32.887884 | 126 | 0.566778 |
aced5745274d88a6103f2724310a6fbccac43d88 | 2,652 | py | Python | emojis_used_pie.py | Wokiri/Emojis_in_Whatsapp_Chat_Text | abfef922eabc93a0cd23a349c070855d913337dc | [
"MIT"
] | null | null | null | emojis_used_pie.py | Wokiri/Emojis_in_Whatsapp_Chat_Text | abfef922eabc93a0cd23a349c070855d913337dc | [
"MIT"
] | null | null | null | emojis_used_pie.py | Wokiri/Emojis_in_Whatsapp_Chat_Text | abfef922eabc93a0cd23a349c070855d913337dc | [
"MIT"
] | null | null | null | from pathlib import Path
import collections, re, datetime
from string import printable
from random import sample
import pandas as pd
from bokeh.plotting import figure
from bokeh.transform import cumsum
from bokeh.models import ColumnDataSource
from bokeh.palettes import BrBG10
from bokeh.io import output_file, show
from math import pi
chats = Path(r'./WhatsApp_Chat.txt')
alpha_nums = printable + '''
”“’• —‘
♂
♀
ā
☁
¯
ツ
¯
'ê'
'''
data_json = {}
emojis_used = []
start_date = None
end_date = None
date_pattern = re.compile(r'^\d{1,2}/\d{1,2}/\d{1,2}, \d{1,2}:\d{1,2}')
with open(chats, mode='r') as file_reader:
data = file_reader.read()
for item in data:
if item not in alpha_nums:
emojis_used.append(item)
with open(chats, mode='r') as file_reader:
all_lines = file_reader.readlines()
start_date = date_pattern.findall(all_lines[0])[0].replace(',', '')
end_date = date_pattern.findall(all_lines[len(all_lines)-1])[0].replace(',', '')
start_date = datetime.datetime.strptime(start_date, '%m/%d/%y %H:%M')
end_date = datetime.datetime.strptime(end_date, '%m/%d/%y %H:%M')
data_json = dict(collections.Counter(emojis_used))
data_json = dict(sorted(data_json.items(), reverse=True, key=lambda item: item[1]))
# columns = list(data_json.keys())
data = {}
index = 0
for i,j in data_json.items():
if index < 10:
data.update({i:j})
index+=1
data_json = data
emojis_used_DF = pd.Series(data_json).reset_index(name='occurences').rename(columns={'index':'emoji'})
emojis_used_DF['angle'] = emojis_used_DF['occurences']/emojis_used_DF['occurences'].sum() * 2*pi
# emojis_used_DF['color'] = inferno(len(data_json))
emojis_used_DF['color'] = BrBG10
emojis_used_DF = emojis_used_DF.sort_values(
by=['occurences'],
ascending = False
)
output_file(filename = "emojis_used_pie.html", title='Piechart | Number of Emojis')
emojis_used_CDS = ColumnDataSource(emojis_used_DF)
emojis_used_fig = figure(
title=f"Top 10 Emojis Used since {start_date} to {end_date}",
plot_height=600,
plot_width=800,
tooltips=[
('Emoji', '@emoji'),
('Occurrences', '@occurences'),
],
)
emojis_used_fig.wedge(
x=0,
y=1,
radius=0.6,
start_angle=cumsum('angle', include_zero=True),
end_angle=cumsum('angle'),
line_color="#ffffff",
fill_color='color',
source=emojis_used_CDS,
legend_field='emoji',
)
emojis_used_fig.toolbar.active_drag = None
emojis_used_fig.title.align = "center"
emojis_used_fig.title.text_color = "darkgreen"
emojis_used_fig.title.text_font_size = "18px"
show(emojis_used_fig) | 21.216 | 102 | 0.687406 |
aced5798b9cb4572686606a553037d395a7984cf | 90 | py | Python | aoc20211217b.py | BarnabyShearer/aoc | 4feb66c668b068f0f42ad99b916e80732eba5a2d | [
"MIT"
] | null | null | null | aoc20211217b.py | BarnabyShearer/aoc | 4feb66c668b068f0f42ad99b916e80732eba5a2d | [
"MIT"
] | null | null | null | aoc20211217b.py | BarnabyShearer/aoc | 4feb66c668b068f0f42ad99b916e80732eba5a2d | [
"MIT"
] | null | null | null | from aoc20211217a import *
def aoc(data):
return sum(1 for _ in hits(*parse(data)))
| 15 | 45 | 0.677778 |
aced579ac80ad2462d87acbe9dae3c2143f63b01 | 3,815 | py | Python | reader/joint_reader.py | 0YuanZhang0/PALM | 00f02c784d5f2123652e81fe7abb632d3cfa05bf | [
"Apache-2.0"
] | null | null | null | reader/joint_reader.py | 0YuanZhang0/PALM | 00f02c784d5f2123652e81fe7abb632d3cfa05bf | [
"Apache-2.0"
] | null | null | null | reader/joint_reader.py | 0YuanZhang0/PALM | 00f02c784d5f2123652e81fe7abb632d3cfa05bf | [
"Apache-2.0"
] | null | null | null | #encoding=utf8
import os
import sys
import random
import numpy as np
import paddle
import paddle.fluid as fluid
from utils.placeholder import Placeholder
def repeat(reader):
"""Repeat a generator forever"""
generator = reader()
while True:
try:
yield next(generator)
except StopIteration:
generator = reader()
yield next(generator)
def create_joint_generator(input_shape, generators, task_map_id, is_multi_task=True):
def empty_output(input_shape, batch_size=1):
results = []
for i in range(len(input_shape)):
if input_shape[i][1] == 'int32':
dtype = np.int32
if input_shape[i][1] == 'int64':
dtype = np.int64
if input_shape[i][1] == 'float32':
dtype = np.float32
if input_shape[i][1] == 'float64':
dtype = np.float64
shape = input_shape[i][0]
shape[0] = batch_size
pad_tensor = np.zeros(shape=shape, dtype=dtype)
results.append(pad_tensor)
return results
def wrapper():
generators_inst = [repeat(gen[0]) for gen in generators]
generators_ratio = [gen[1] for gen in generators]
weights = [ratio/sum(generators_ratio) for ratio in generators_ratio]
task_names = [gen[2] for gen in generators]
task_names_ids = [0]
for i in range(1, len(task_names)):
if task_names[i] == task_names[i - 1]:
task_names_ids.append(task_names_ids[-1])
else:
task_names_ids.append(task_names_ids[-1] + 1)
run_task_id = range(len(generators))
while True:
idx = np.random.choice(run_task_id, p=weights)
gen_results = next(generators_inst[idx])
if not gen_results:
break
batch_size = gen_results[0].shape[0]
results = empty_output(input_shape, batch_size)
task_id_tensor = np.array([[task_names_ids[idx]]]).astype("int64")
results[0] = task_id_tensor
backbone_range_start = task_map_id[0][0]
backbone_range_end = task_map_id[0][1]
for i in range(backbone_range_start, backbone_range_end):
results[i] = gen_results[i - 1]
cur_gene_task = task_names_ids[idx] + 1
for j in range(task_map_id[cur_gene_task][0], task_map_id[cur_gene_task][1]):
results[j] = gen_results[i]
i += 1
yield results
return wrapper
def create_reader(reader_name, input_shape, is_multi_task, task_map_id, *gens):
"""
build reader for multi_task_learning
"""
placeholder = Placeholder(input_shape)
pyreader, model_inputs = placeholder.build(capacity=16, reader_name=reader_name)
joint_generator = create_joint_generator(input_shape, gens[0], task_map_id, is_multi_task=is_multi_task)
return joint_generator, pyreader, model_inputs
def joint_input_shape(input_shape_list):
"""
joint main task and auxiliary tasks input shape
"""
joint_test_input_shape = input_shape_list[0][1]["backbone"] + input_shape_list[0][1]["task"]
joint_train_input_shape = [([1, 1], 'int64')] # task_id_shape
backbone_input_shape = input_shape_list[0][0]["backbone"]
joint_train_input_shape.extend(backbone_input_shape)
task_map_id = [(1, len(input_shape_list[0][0]["backbone"]) + 1)]
for input_shape in input_shape_list:
task_input_shape = input_shape[0]["task"]
joint_train_input_shape.extend(task_input_shape)
task_map_id.append((task_map_id[-1][1], task_map_id[-1][1] + len(task_input_shape)))
return joint_train_input_shape, joint_test_input_shape, task_map_id
| 35.324074 | 108 | 0.629096 |
aced586ae9d45fa0d2a89d8e29a0bd648c30f86d | 5,151 | py | Python | roles/lib_openshift/src/class/oc_storageclass.py | shgriffi/openshift-ansible | 6313f519307cf50055589c3876d8bec398bbc4d4 | [
"Apache-2.0"
] | 164 | 2015-07-29T17:35:04.000Z | 2021-12-16T16:38:04.000Z | roles/lib_openshift/src/class/oc_storageclass.py | shgriffi/openshift-ansible | 6313f519307cf50055589c3876d8bec398bbc4d4 | [
"Apache-2.0"
] | 3,634 | 2015-06-09T13:49:15.000Z | 2022-03-23T20:55:44.000Z | roles/lib_openshift/src/class/oc_storageclass.py | shgriffi/openshift-ansible | 6313f519307cf50055589c3876d8bec398bbc4d4 | [
"Apache-2.0"
] | 250 | 2015-06-08T19:53:11.000Z | 2022-03-01T04:51:23.000Z | # pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-instance-attributes
class OCStorageClass(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'storageclass'
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
verbose=False):
''' Constructor for OCStorageClass '''
super(OCStorageClass, self).__init__(None, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
self.storage_class = None
def exists(self):
''' return whether a storageclass exists'''
if self.storage_class:
return True
return False
def get(self):
'''return storageclass '''
result = self._get(self.kind, self.config.name)
if result['returncode'] == 0:
self.storage_class = StorageClass(content=result['results'][0])
elif '\"%s\" not found' % self.config.name in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.name)
def create(self):
'''create the object'''
return self._create_from_content(self.config.name, self.config.data)
def update(self):
'''update the object'''
# parameters are currently unable to be updated. need to delete and recreate
self.delete()
# pause here and attempt to wait for delete.
# Better option would be to poll
import time
time.sleep(5)
return self.create()
def needs_update(self):
''' verify an update is needed '''
# check if params have updated
if self.storage_class.get_parameters() != self.config.parameters:
return True
for anno_key, anno_value in self.storage_class.get_annotations().items():
if 'is-default-class' in anno_key and anno_value != self.config.default_storage_class:
return True
return False
@staticmethod
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: This function should be refactored into its individual parts.
def run_ansible(params, check_mode):
'''run the ansible idempotent code'''
rconfig = StorageClassConfig(params['name'],
provisioner="kubernetes.io/{}".format(params['provisioner']),
parameters=params['parameters'],
annotations=params['annotations'],
api_version="storage.k8s.io/{}".format(params['api_version']),
default_storage_class=params.get('default_storage_class', 'false'),
kubeconfig=params['kubeconfig'],
)
oc_sc = OCStorageClass(rconfig, verbose=params['debug'])
state = params['state']
api_rval = oc_sc.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': 'list'}
########
# Delete
########
if state == 'absent':
if oc_sc.exists():
if check_mode:
return {'changed': True, 'msg': 'Would have performed a delete.'}
api_rval = oc_sc.delete()
return {'changed': True, 'results': api_rval, 'state': 'absent'}
return {'changed': False, 'state': 'absent'}
if state == 'present':
########
# Create
########
if not oc_sc.exists():
if check_mode:
return {'changed': True, 'msg': 'Would have performed a create.'}
# Create it here
api_rval = oc_sc.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_sc.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': 'present'}
########
# Update
########
if oc_sc.needs_update():
api_rval = oc_sc.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_sc.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': 'present'}
return {'changed': False, 'results': api_rval, 'state': 'present'}
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. %s' % state,
'state': 'unknown'}
| 33.019231 | 104 | 0.519899 |
aced58a56dfce3bf73fb1c32c46068f468a1d49a | 17,791 | py | Python | google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py | TheMichaelHu/python-aiplatform | e03f373a7e44c354eda88875a41c771f6d7e3ce1 | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py | TheMichaelHu/python-aiplatform | e03f373a7e44c354eda88875a41c771f6d7e3ce1 | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py | TheMichaelHu/python-aiplatform | e03f373a7e44c354eda88875a41c771f6d7e3ce1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.aiplatform_v1beta1.types import specialist_pool
from google.cloud.aiplatform_v1beta1.types import specialist_pool_service
from google.longrunning import operations_pb2 # type: ignore
from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO
class SpecialistPoolServiceGrpcTransport(SpecialistPoolServiceTransport):
"""gRPC backend transport for SpecialistPoolService.
A service for creating and managing Customer SpecialistPools.
When customers start Data Labeling jobs, they can reuse/create
Specialist Pools to bring their own Specialists to label the
data. Customers can add/remove Managers for the Specialist Pool
on Cloud console, then Managers will get email notifications to
manage Specialists and tasks on CrowdCompute console.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service."""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def create_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.CreateSpecialistPoolRequest], operations_pb2.Operation
]:
r"""Return a callable for the create specialist pool method over gRPC.
Creates a SpecialistPool.
Returns:
Callable[[~.CreateSpecialistPoolRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_specialist_pool" not in self._stubs:
self._stubs["create_specialist_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.SpecialistPoolService/CreateSpecialistPool",
request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_specialist_pool"]
@property
def get_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.GetSpecialistPoolRequest],
specialist_pool.SpecialistPool,
]:
r"""Return a callable for the get specialist pool method over gRPC.
Gets a SpecialistPool.
Returns:
Callable[[~.GetSpecialistPoolRequest],
~.SpecialistPool]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_specialist_pool" not in self._stubs:
self._stubs["get_specialist_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.SpecialistPoolService/GetSpecialistPool",
request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize,
response_deserializer=specialist_pool.SpecialistPool.deserialize,
)
return self._stubs["get_specialist_pool"]
@property
def list_specialist_pools(
self,
) -> Callable[
[specialist_pool_service.ListSpecialistPoolsRequest],
specialist_pool_service.ListSpecialistPoolsResponse,
]:
r"""Return a callable for the list specialist pools method over gRPC.
Lists SpecialistPools in a Location.
Returns:
Callable[[~.ListSpecialistPoolsRequest],
~.ListSpecialistPoolsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_specialist_pools" not in self._stubs:
self._stubs["list_specialist_pools"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.SpecialistPoolService/ListSpecialistPools",
request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize,
response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize,
)
return self._stubs["list_specialist_pools"]
@property
def delete_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.DeleteSpecialistPoolRequest], operations_pb2.Operation
]:
r"""Return a callable for the delete specialist pool method over gRPC.
Deletes a SpecialistPool as well as all Specialists
in the pool.
Returns:
Callable[[~.DeleteSpecialistPoolRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_specialist_pool" not in self._stubs:
self._stubs["delete_specialist_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.SpecialistPoolService/DeleteSpecialistPool",
request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_specialist_pool"]
@property
def update_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.UpdateSpecialistPoolRequest], operations_pb2.Operation
]:
r"""Return a callable for the update specialist pool method over gRPC.
Updates a SpecialistPool.
Returns:
Callable[[~.UpdateSpecialistPoolRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_specialist_pool" not in self._stubs:
self._stubs["update_specialist_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.SpecialistPoolService/UpdateSpecialistPool",
request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_specialist_pool"]
def close(self):
self.grpc_channel.close()
@property
def kind(self) -> str:
return "grpc"
__all__ = ("SpecialistPoolServiceGrpcTransport",)
| 43.928395 | 102 | 0.64808 |
aced5abfc9db9789fe3ac18882e2f4050bb282eb | 1,659 | py | Python | samples/snippets/create_key_version.py | anukaal/python-kms | e28126d7ab1b2b44ee54c6a4a1ddc4d5c15a57b6 | [
"Apache-2.0"
] | 24 | 2020-07-07T03:17:32.000Z | 2022-03-30T14:48:01.000Z | samples/snippets/create_key_version.py | anukaal/python-kms | e28126d7ab1b2b44ee54c6a4a1ddc4d5c15a57b6 | [
"Apache-2.0"
] | 90 | 2020-02-05T22:20:20.000Z | 2022-03-30T22:42:11.000Z | samples/snippets/create_key_version.py | anukaal/python-kms | e28126d7ab1b2b44ee54c6a4a1ddc4d5c15a57b6 | [
"Apache-2.0"
] | 31 | 2020-02-08T13:51:41.000Z | 2022-03-22T01:08:04.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# [START kms_create_key_version]
def create_key_version(project_id, location_id, key_ring_id, key_id):
"""
Creates a new version of the given key.
Args:
project_id (string): Google Cloud project ID (e.g. 'my-project').
location_id (string): Cloud KMS location (e.g. 'us-east1').
key_ring_id (string): ID of the Cloud KMS key ring (e.g. 'my-key-ring').
key_id (string): ID of the key for which to create a new version (e.g. 'my-key').
Returns:
CryptoKeyVersion: Cloud KMS key version.
"""
# Import the client library.
from google.cloud import kms
# Create the client.
client = kms.KeyManagementServiceClient()
# Build the parent key name.
key_name = client.crypto_key_path(project_id, location_id, key_ring_id, key_id)
# Build the key version.
version = {}
# Call the API.
created_version = client.create_crypto_key_version(request={'parent': key_name, 'crypto_key_version': version})
print('Created key version: {}'.format(created_version.name))
return created_version
# [END kms_create_key_version]
| 34.5625 | 115 | 0.707052 |
aced5b94bdbe538453776911bc6f3fb65b9174d2 | 225 | py | Python | publications/PrADA/experiments/income_census/tsne_config.py | UMDataScienceLab/research | 279ee21444817903cb9ef9dc9d9583a502865336 | [
"Apache-2.0"
] | 49 | 2020-11-04T03:15:59.000Z | 2022-03-23T12:21:15.000Z | publications/PrADA/experiments/income_census/tsne_config.py | UMDataScienceLab/research | 279ee21444817903cb9ef9dc9d9583a502865336 | [
"Apache-2.0"
] | 2 | 2021-09-12T02:36:42.000Z | 2021-11-25T13:19:58.000Z | publications/PrADA/experiments/income_census/tsne_config.py | UMDataScienceLab/research | 279ee21444817903cb9ef9dc9d9583a502865336 | [
"Apache-2.0"
] | 11 | 2020-11-11T12:14:49.000Z | 2022-03-08T16:17:05.000Z | tsne_embedding_creation = {
"tsne_embedding_data_dir": "YOUR_ORIGINAL_DATA_DIR/tsne_emb/",
"tsne_graph_output_dir": "YOUR_GRAPH_OUTPUT_DATA_DIR/output/",
"apply_adaptation": True,
"using_interaction": False
}
| 32.142857 | 66 | 0.76 |
aced5bdd94dd3be6c8e086940eb107d1a04efa33 | 2,198 | py | Python | instapy/comment_util.py | lbgrand/RoboGram | 85c977ffc1ab30a930e5232aeb400eaf45cd946f | [
"MIT"
] | 2 | 2018-12-27T20:03:20.000Z | 2020-10-23T07:56:17.000Z | instapy/comment_util.py | lbgrand/RoboGram | 85c977ffc1ab30a930e5232aeb400eaf45cd946f | [
"MIT"
] | null | null | null | instapy/comment_util.py | lbgrand/RoboGram | 85c977ffc1ab30a930e5232aeb400eaf45cd946f | [
"MIT"
] | 1 | 2019-09-24T19:37:11.000Z | 2019-09-24T19:37:11.000Z | # -*- coding: utf-8 -*-
"""Module which handles the commenting features"""
from random import choice
from .time_util import sleep
from selenium.common.exceptions import WebDriverException
import emoji
def get_comment_input(browser):
comment_input = browser.find_elements_by_xpath(
'//textarea[@placeholder = "Add a comment…"]')
if len(comment_input) <= 0:
comment_input = browser.find_elements_by_xpath(
'//input[@placeholder = "Add a comment…"]')
return comment_input
def open_comment_section(browser):
missing_comment_elem_warning = (
'--> Warning: Comment Button Not Found:'
' May cause issues with browser windows of smaller widths')
comment_elem = browser.find_elements_by_xpath(
"//a[@role='button']/span[text()='Comment']/..")
if len(comment_elem) > 0:
try:
browser.execute_script(
"arguments[0].click();", comment_elem[0])
except WebDriverException:
print(missing_comment_elem_warning)
return []
else:
print(missing_comment_elem_warning)
return []
def comment_image(browser, comments):
"""Checks if it should comment on the image"""
rand_comment = (choice(comments))
rand_comment = emoji.demojize(rand_comment)
rand_comment = emoji.emojize(rand_comment, use_aliases=True)
sleep(2)
open_comment_section(browser)
comment_input = get_comment_input(browser)
if len(comment_input) > 0:
comment_input[0].clear()
comment_input = get_comment_input(browser)
browser.execute_script(
"arguments[0].value = '" + rand_comment + " ';", comment_input[0])
# An extra space is added here and then deleted.
# This forces the input box to update the reactJS core
comment_input[0].send_keys("\b")
comment_input = get_comment_input(browser)
comment_input[0].submit()
else:
print('--> Warning: Comment Action Likely Failed:'
' Comment Element not found')
print('--> Not commented')
return 0
print("--> Commented: {}".format(rand_comment.encode('utf-8')))
sleep(3)
return 1
| 33.815385 | 78 | 0.649682 |
aced5c3441a4d1c414c2747b297ed91c27308010 | 20,975 | py | Python | statsmodels/sandbox/panel/mixed.py | haribharadwaj/statsmodels | 8675b890607fe6f116b1186dcba4c387c5e3778a | [
"BSD-3-Clause"
] | 15 | 2015-03-03T09:47:42.000Z | 2022-01-05T18:28:31.000Z | statsmodels/sandbox/panel/mixed.py | haribharadwaj/statsmodels | 8675b890607fe6f116b1186dcba4c387c5e3778a | [
"BSD-3-Clause"
] | 7 | 2015-11-20T08:33:04.000Z | 2020-07-24T19:34:39.000Z | statsmodels/sandbox/panel/mixed.py | haribharadwaj/statsmodels | 8675b890607fe6f116b1186dcba4c387c5e3778a | [
"BSD-3-Clause"
] | 14 | 2015-01-06T22:08:34.000Z | 2021-01-01T16:33:23.000Z | """
Mixed effects models
Author: Jonathan Taylor
Author: Josef Perktold
License: BSD-3
Notes
------
It's pretty slow if the model is misspecified, in my first example convergence
in loglike is not reached within 2000 iterations. Added stop criteria based
on convergence of parameters instead.
With correctly specified model, convergence is fast, in 6 iterations in
example.
"""
from __future__ import print_function
import numpy as np
import numpy.linalg as L
from statsmodels.base.model import LikelihoodModelResults
from statsmodels.tools.decorators import cache_readonly
class Unit(object):
"""
Individual experimental unit for
EM implementation of (repeated measures)
mixed effects model.
\'Maximum Likelihood Computations with Repeated Measures:
Application of the EM Algorithm\'
Nan Laird; Nicholas Lange; Daniel Stram
Journal of the American Statistical Association,
Vol. 82, No. 397. (Mar., 1987), pp. 97-105.
Parameters
----------
endog : ndarray, (nobs,)
response, endogenous variable
exog_fe : ndarray, (nobs, k_vars_fe)
explanatory variables as regressors or fixed effects,
should include exog_re to correct mean of random
coefficients, see Notes
exog_re : ndarray, (nobs, k_vars_re)
explanatory variables or random effects or coefficients
Notes
-----
If the exog_re variables are not included in exog_fe, then the
mean of the random constants or coefficients are not centered.
The covariance matrix of the random parameter estimates are not
centered in this case. (That's how it looks to me. JP)
"""
def __init__(self, endog, exog_fe, exog_re):
self.Y = endog
self.X = exog_fe
self.Z = exog_re
self.n = endog.shape[0]
def _compute_S(self, D, sigma):
"""covariance of observations (nobs_i, nobs_i) (JP check)
Display (3.3) from Laird, Lange, Stram (see help(Unit))
"""
self.S = (np.identity(self.n) * sigma**2 +
np.dot(self.Z, np.dot(D, self.Z.T)))
def _compute_W(self):
"""inverse covariance of observations (nobs_i, nobs_i) (JP check)
Display (3.2) from Laird, Lange, Stram (see help(Unit))
"""
self.W = L.inv(self.S)
def compute_P(self, Sinv):
"""projection matrix (nobs_i, nobs_i) (M in regression ?) (JP check, guessing)
Display (3.10) from Laird, Lange, Stram (see help(Unit))
W - W X Sinv X' W'
"""
t = np.dot(self.W, self.X)
self.P = self.W - np.dot(np.dot(t, Sinv), t.T)
def _compute_r(self, alpha):
"""residual after removing fixed effects
Display (3.5) from Laird, Lange, Stram (see help(Unit))
"""
self.r = self.Y - np.dot(self.X, alpha)
def _compute_b(self, D):
"""coefficients for random effects/coefficients
Display (3.4) from Laird, Lange, Stram (see help(Unit))
D Z' W r
"""
self.b = np.dot(D, np.dot(np.dot(self.Z.T, self.W), self.r))
def fit(self, a, D, sigma):
"""
Compute unit specific parameters in
Laird, Lange, Stram (see help(Unit)).
Displays (3.2)-(3.5).
"""
self._compute_S(D, sigma) #random effect plus error covariance
self._compute_W() #inv(S)
self._compute_r(a) #residual after removing fixed effects/exogs
self._compute_b(D) #? coefficients on random exog, Z ?
def compute_xtwy(self):
"""
Utility function to compute X^tWY (transposed ?) for Unit instance.
"""
return np.dot(np.dot(self.W, self.Y), self.X) #is this transposed ?
def compute_xtwx(self):
"""
Utility function to compute X^tWX for Unit instance.
"""
return np.dot(np.dot(self.X.T, self.W), self.X)
def cov_random(self, D, Sinv=None):
"""
Approximate covariance of estimates of random effects. Just after
Display (3.10) in Laird, Lange, Stram (see help(Unit)).
D - D' Z' P Z D
Notes
-----
In example where the mean of the random coefficient is not zero, this
is not a covariance but a non-centered moment. (proof by example)
"""
if Sinv is not None:
self.compute_P(Sinv)
t = np.dot(self.Z, D)
return D - np.dot(np.dot(t.T, self.P), t)
def logL(self, a, ML=False):
"""
Individual contributions to the log-likelihood, tries to return REML
contribution by default though this requires estimated
fixed effect a to be passed as an argument.
no constant with pi included
a is not used if ML=true (should be a=None in signature)
If ML is false, then the residuals are calculated for the given fixed
effects parameters a.
"""
if ML:
return (np.log(L.det(self.W)) - (self.r * np.dot(self.W, self.r)).sum()) / 2.
else:
if a is None:
raise ValueError('need fixed effect a for REML contribution to log-likelihood')
r = self.Y - np.dot(self.X, a)
return (np.log(L.det(self.W)) - (r * np.dot(self.W, r)).sum()) / 2.
def deviance(self, ML=False):
'''deviance defined as 2 times the negative loglikelihood
'''
return - 2 * self.logL(ML=ML)
class OneWayMixed(object):
"""
Model for
EM implementation of (repeated measures)
mixed effects model.
\'Maximum Likelihood Computations with Repeated Measures:
Application of the EM Algorithm\'
Nan Laird; Nicholas Lange; Daniel Stram
Journal of the American Statistical Association,
Vol. 82, No. 397. (Mar., 1987), pp. 97-105.
Parameters
----------
units : list of units
the data for the individual units should be attached to the units
response, fixed and random : formula expression, called as argument to Formula
*available results and alias*
(subject to renaming, and coversion to cached attributes)
params() -> self.a : coefficient for fixed effects or exog
cov_params() -> self.Sinv : covariance estimate of fixed effects/exog
bse() : standard deviation of params
cov_random -> self.D : estimate of random effects covariance
params_random_units -> [self.units[...].b] : random coefficient for each unit
*attributes*
(others)
self.m : number of units
self.p : k_vars_fixed
self.q : k_vars_random
self.N : nobs (total)
Notes
-----
Fit returns a result instance, but not all results that use the inherited
methods have been checked.
Parameters need to change: drop formula and we require a naming convention for
the units (currently Y,X,Z). - endog, exog_fe, endog_re ?
logL does not include constant, e.g. sqrt(pi)
llf is for MLE not for REML
convergence criteria for iteration
Currently convergence in the iterative solver is reached if either the loglikelihood
*or* the fixed effects parameter don't change above tolerance.
In some examples, the fixed effects parameters converged to 1e-5 within 150 iterations
while the log likelihood did not converge within 2000 iterations. This might be
the case if the fixed effects parameters are well estimated, but there are still
changes in the random effects. If params_rtol and params_atol are set at a higher
level, then the random effects might not be estimated to a very high precision.
The above was with a misspecified model, without a constant. With a
correctly specified model convergence is fast, within a few iterations
(6 in example).
"""
def __init__(self, units):
self.units = units
self.m = len(self.units)
self.n_units = self.m
self.N = sum(unit.X.shape[0] for unit in self.units)
self.nobs = self.N #alias for now
# Determine size of fixed effects
d = self.units[0].X
self.p = d.shape[1] # d.shape = p
self.k_exog_fe = self.p #alias for now
self.a = np.zeros(self.p, np.float64)
# Determine size of D, and sensible initial estimates
# of sigma and D
d = self.units[0].Z
self.q = d.shape[1] # Z.shape = q
self.k_exog_re = self.q #alias for now
self.D = np.zeros((self.q,)*2, np.float64)
self.sigma = 1.
self.dev = np.inf #initialize for iterations, move it?
def _compute_a(self):
"""fixed effects parameters
Display (3.1) of
Laird, Lange, Stram (see help(Mixed)).
"""
for unit in self.units:
unit.fit(self.a, self.D, self.sigma)
S = sum([unit.compute_xtwx() for unit in self.units])
Y = sum([unit.compute_xtwy() for unit in self.units])
self.Sinv = L.pinv(S)
self.a = np.dot(self.Sinv, Y)
def _compute_sigma(self, ML=False):
"""
Estimate sigma. If ML is True, return the ML estimate of sigma,
else return the REML estimate.
If ML, this is (3.6) in Laird, Lange, Stram (see help(Mixed)),
otherwise it corresponds to (3.8).
sigma is the standard deviation of the noise (residual)
"""
sigmasq = 0.
for unit in self.units:
if ML:
W = unit.W
else:
unit.compute_P(self.Sinv)
W = unit.P
t = unit.r - np.dot(unit.Z, unit.b)
sigmasq += np.power(t, 2).sum()
sigmasq += self.sigma**2 * np.trace(np.identity(unit.n) -
self.sigma**2 * W)
self.sigma = np.sqrt(sigmasq / self.N)
def _compute_D(self, ML=False):
"""
Estimate random effects covariance D.
If ML is True, return the ML estimate of sigma,
else return the REML estimate.
If ML, this is (3.7) in Laird, Lange, Stram (see help(Mixed)),
otherwise it corresponds to (3.9).
"""
D = 0.
for unit in self.units:
if ML:
W = unit.W
else:
unit.compute_P(self.Sinv)
W = unit.P
D += np.multiply.outer(unit.b, unit.b)
t = np.dot(unit.Z, self.D)
D += self.D - np.dot(np.dot(t.T, W), t)
self.D = D / self.m
def cov_fixed(self):
"""
Approximate covariance of estimates of fixed effects.
Just after Display (3.10) in Laird, Lange, Stram (see help(Mixed)).
"""
return self.Sinv
#----------- alias (JP) move to results class ?
def cov_random(self):
"""
Estimate random effects covariance D.
If ML is True, return the ML estimate of sigma, else return the REML estimate.
see _compute_D, alias for self.D
"""
return self.D
@property
def params(self):
'''
estimated coefficients for exogeneous variables or fixed effects
see _compute_a, alias for self.a
'''
return self.a
@property
def params_random_units(self):
'''random coefficients for each unit
'''
return np.array([unit.b for unit in self.units])
def cov_params(self):
'''
estimated covariance for coefficients for exogeneous variables or fixed effects
see cov_fixed, and Sinv in _compute_a
'''
return self.cov_fixed()
@property
def bse(self):
'''
standard errors of estimated coefficients for exogeneous variables (fixed)
'''
return np.sqrt(np.diag(self.cov_params()))
#----------- end alias
def deviance(self, ML=False):
'''deviance defined as 2 times the negative loglikelihood
'''
return -2 * self.logL(ML=ML)
def logL(self, ML=False):
"""
Return log-likelihood, REML by default.
"""
#I don't know what the difference between REML and ML is here.
logL = 0.
for unit in self.units:
logL += unit.logL(a=self.a, ML=ML)
if not ML:
logL += np.log(L.det(self.Sinv)) / 2
return logL
def initialize(self):
S = sum([np.dot(unit.X.T, unit.X) for unit in self.units])
Y = sum([np.dot(unit.X.T, unit.Y) for unit in self.units])
self.a = L.lstsq(S, Y, rcond=-1)[0]
D = 0
t = 0
sigmasq = 0
for unit in self.units:
unit.r = unit.Y - np.dot(unit.X, self.a)
if self.q > 1:
unit.b = L.lstsq(unit.Z, unit.r, rcond=-1)[0]
else:
Z = unit.Z.reshape((unit.Z.shape[0], 1))
unit.b = L.lstsq(Z, unit.r, rcond=-1)[0]
sigmasq += (np.power(unit.Y, 2).sum() -
(self.a * np.dot(unit.X.T, unit.Y)).sum() -
(unit.b * np.dot(unit.Z.T, unit.r)).sum())
D += np.multiply.outer(unit.b, unit.b)
t += L.pinv(np.dot(unit.Z.T, unit.Z))
#TODO: JP added df_resid check
self.df_resid = (self.N - (self.m - 1) * self.q - self.p)
sigmasq /= (self.N - (self.m - 1) * self.q - self.p)
self.sigma = np.sqrt(sigmasq)
self.D = (D - sigmasq * t) / self.m
def cont(self, ML=False, rtol=1.0e-05, params_rtol=1e-5, params_atol=1e-4):
'''convergence check for iterative estimation
'''
self.dev, old = self.deviance(ML=ML), self.dev
#self.history.append(np.hstack((self.dev, self.a)))
self.history['llf'].append(self.dev)
self.history['params'].append(self.a.copy())
self.history['D'].append(self.D.copy())
if np.fabs((self.dev - old) / self.dev) < rtol: #why is there times `*`?
#print np.fabs((self.dev - old)), self.dev, old
self.termination = 'llf'
return False
#break if parameters converged
#TODO: check termination conditions, OR or AND
if np.all(np.abs(self.a - self._a_old) < (params_rtol * self.a + params_atol)):
self.termination = 'params'
return False
self._a_old = self.a.copy()
return True
def fit(self, maxiter=100, ML=False, rtol=1.0e-05, params_rtol=1e-6, params_atol=1e-6):
#initialize for convergence criteria
self._a_old = np.inf * self.a
self.history = {'llf':[], 'params':[], 'D':[]}
for i in range(maxiter):
self._compute_a() #a, Sinv : params, cov_params of fixed exog
self._compute_sigma(ML=ML) #sigma MLE or REML of sigma ?
self._compute_D(ML=ML) #D : covariance of random effects, MLE or REML
if not self.cont(ML=ML, rtol=rtol, params_rtol=params_rtol,
params_atol=params_atol):
break
else: #if end of loop is reached without break
self.termination = 'maxiter'
print('Warning: maximum number of iterations reached')
self.iterations = i
results = OneWayMixedResults(self)
#compatibility functions for fixed effects/exog
results.scale = 1
results.normalized_cov_params = self.cov_params()
return results
class OneWayMixedResults(LikelihoodModelResults):
'''Results class for OneWayMixed models
'''
def __init__(self, model):
#TODO: check, change initialization to more standard pattern
self.model = model
self.params = model.params
#need to overwrite this because we don't have a standard
#model.loglike yet
#TODO: what todo about REML loglike, logL is not normalized
@cache_readonly
def llf(self):
return self.model.logL(ML=True)
@property
def params_random_units(self):
return self.model.params_random_units
def cov_random(self):
return self.model.cov_random()
def mean_random(self, idx='lastexog'):
if idx == 'lastexog':
meanr = self.params[-self.model.k_exog_re:]
elif isinstance(idx, list):
if not len(idx) == self.model.k_exog_re:
raise ValueError('length of idx different from k_exog_re')
else:
meanr = self.params[idx]
else:
meanr = np.zeros(self.model.k_exog_re)
return meanr
def std_random(self):
return np.sqrt(np.diag(self.cov_random()))
def plot_random_univariate(self, bins=None, use_loc=True):
'''create plot of marginal distribution of random effects
Parameters
----------
bins : int or bin edges
option for bins in matplotlibs hist method. Current default is not
very sophisticated. All distributions use the same setting for
bins.
use_loc : bool
If True, then the distribution with mean given by the fixed
effect is used.
Returns
-------
fig : matplotlib figure instance
figure with subplots
Notes
-----
What can make this fancier?
Bin edges will not make sense if loc or scale differ across random
effect distributions.
'''
#outsource this
import matplotlib.pyplot as plt
from scipy.stats import norm as normal
fig = plt.figure()
k = self.model.k_exog_re
if k > 3:
rows, cols = int(np.ceil(k * 0.5)), 2
else:
rows, cols = k, 1
if bins is None:
#bins = self.model.n_units // 20 #TODO: just roughly, check
#bins = np.sqrt(self.model.n_units)
bins = 5 + 2 * self.model.n_units**(1./3.)
if use_loc:
loc = self.mean_random()
else:
loc = [0]*k
scale = self.std_random()
for ii in range(k):
ax = fig.add_subplot(rows, cols, ii)
freq, bins_, _ = ax.hist(loc[ii] + self.params_random_units[:,ii],
bins=bins, normed=True)
points = np.linspace(bins_[0], bins_[-1], 200)
#ax.plot(points, normal.pdf(points, loc=loc, scale=scale))
#loc of sample is approx. zero, with Z appended to X
#alternative, add fixed to mean
ax.set_title('Random Effect %d Marginal Distribution' % ii)
ax.plot(points,
normal.pdf(points, loc=loc[ii], scale=scale[ii]),
'r')
return fig
def plot_scatter_pairs(self, idx1, idx2, title=None, ax=None):
'''create scatter plot of two random effects
Parameters
----------
idx1, idx2 : int
indices of the two random effects to display, corresponding to
columns of exog_re
title : None or string
If None, then a default title is added
ax : None or matplotlib axis instance
If None, then a figure with one axis is created and returned.
If ax is not None, then the scatter plot is created on it, and
this axis instance is returned.
Returns
-------
ax_or_fig : axis or figure instance
see ax parameter
Notes
-----
Still needs ellipse from estimated parameters
'''
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax_or_fig = fig
re1 = self.params_random_units[:,idx1]
re2 = self.params_random_units[:,idx2]
ax.plot(re1, re2, 'o', alpha=0.75)
if title is None:
title = 'Random Effects %d and %d' % (idx1, idx2)
ax.set_title(title)
ax_or_fig = ax
return ax_or_fig
def plot_scatter_all_pairs(self, title=None):
from statsmodels.graphics.plot_grids import scatter_ellipse
if self.model.k_exog_re < 2:
raise ValueError('less than two variables available')
return scatter_ellipse(self.params_random_units,
#ell_kwds not implemented yet
ell_kwds={'color':'r'})
# #note I have written this already as helper function, get it
# import matplotlib.pyplot as plt
# #from scipy.stats import norm as normal
# fig = plt.figure()
# k = self.model.k_exog_re
# n_plots = k * (k - 1) // 2
# if n_plots > 3:
# rows, cols = int(np.ceil(n_plots * 0.5)), 2
# else:
# rows, cols = n_plots, 1
#
# count = 1
# for ii in range(k):
# for jj in range(ii):
# ax = fig.add_subplot(rows, cols, count)
# self.plot_scatter_pairs(ii, jj, title=None, ax=ax)
# count += 1
#
# return fig
| 31.352765 | 95 | 0.579261 |
aced5c3e8860f0799b07e5fb918c0024933035ed | 34,799 | py | Python | appengine/src/greenday_api/video/video_api.py | meedan/montage | 4da0116931edc9af91f226876330645837dc9bcc | [
"Apache-2.0"
] | 6 | 2018-07-31T16:48:07.000Z | 2020-02-01T03:17:51.000Z | appengine/src/greenday_api/video/video_api.py | meedan/montage | 4da0116931edc9af91f226876330645837dc9bcc | [
"Apache-2.0"
] | 41 | 2018-08-07T16:43:07.000Z | 2020-06-05T18:54:50.000Z | appengine/src/greenday_api/video/video_api.py | meedan/montage | 4da0116931edc9af91f226876330645837dc9bcc | [
"Apache-2.0"
] | 1 | 2018-08-07T16:40:18.000Z | 2018-08-07T16:40:18.000Z | """
The Video API
"""
import endpoints
import itertools
from protorpc import message_types
from django.utils import timezone
from django.db.models import Q
from greenday_core.api_exceptions import (
BadRequestException, ForbiddenException, NotFoundException)
from greenday_core.documents.video import VideoDocument
from greenday_core.eventbus import publish_appevent
from greenday_core.memoize_cache import cache_manager
from greenday_core.models import (
Video,
VideoCollection,
UserVideoDetail,
DuplicateVideoMarker
)
from greenday_core.constants import EventKind
from greenday_core.youtube_client import YouTubeClient
from ..api import (
BaseAPI,
greenday_api,
greenday_method,
auth_required,
add_order_to_repeated
)
from ..searchbot import VideoSearch, process_comma_seperate_filters
from ..utils import (
get_obj_or_api_404,
api_appevent,
)
from ..project.mixins import ProjectAPIMixin
from ..mapper import GeneralMapper
from .mappers import VideoTagSearchMapper, VideoMapper
from .caching import remove_video_list_cache
from .messages import (
VideoResponseMessage,
VideoListResponse,
UserVideoDetailResponseMessage,
BatchVideoResponseMessage,
BatchListResponseMessage,
VideoTagListResponse,
VideoBatchListResponseMessage,
VideoResponseMessageSlim
)
from .containers import(
VideoEntityContainer, VideoInsertEntityContainer,
VideoUpdateEntityContainer,
VideoDuplicateEntityContainer,
VideoBooleanFlagEntityContainer,
VideoBatchContainer,
VideoIDBatchContainer,
CreateVideoBatchContainer,
VideoFavouriteBatchContainer,
VideoFilterContainer,
ArchiveVideoBatchContainer
)
from .utils import make_batch_response_message, has_video_search_args
@greenday_api.api_class(
resource_name='video', auth_level=endpoints.AUTH_LEVEL.REQUIRED)
class VideoAPI(BaseAPI, ProjectAPIMixin):
"""
API for Videos
Object disposed after request is completed.
"""
def __init__(self, *args, **kwargs):
"""
Creates Video API
"""
super(VideoAPI, self).__init__(*args, **kwargs)
# standard video mapper
self.mapper = VideoMapper()
self.slim_mapper = VideoMapper(message_cls=VideoResponseMessageSlim)
# favourite mapper to handle responses from favouriting.
self.fave_mapper = GeneralMapper(
UserVideoDetail, UserVideoDetailResponseMessage)
# mapper for video tag filter
self.video_tag_mapper = VideoTagSearchMapper()
@greenday_method(
VideoFilterContainer,
VideoListResponse,
path='project/{project_id}/video',
http_method='GET',
name='list',
pre_middlewares=[auth_required],
post_middlewares=[add_order_to_repeated])
def video_list(self, request):
"""
API Endpoint to list all videos within the passed project
"""
project = self.get_project(
request.project_id,
assigned_only=True
)
def _video_list(request, project):
if request.archived:
qs = Video.archived_objects.all()
else:
qs = Video.objects.all()
ordered_video_ids = None
if request.collection_id:
collection = get_obj_or_api_404(
VideoCollection,
pk=request.collection_id,
project_id=project.pk)
ordered_video_ids = list(
collection.videocollectionvideos
.values_list('video_id', flat=True)
)
if has_video_search_args(request):
# build search bot
video_search = VideoSearch.create(
"videos", VideoDocument, ids_only=True)
video_search = video_search.multi_filter(request)
# grab 1000 results and run the pks into an ORM lookup so that
# we can return database objects to the front end.
ids = video_search[:1000] # DN: this must be a slice
qs = qs.filter(pk__in=ids)
else:
qs = qs.filter(project=project)
videos = (
qs
.select_related("youtube_video")
.prefetch_related("videocollectionvideos")
.order_by('-modified')
)
if ordered_video_ids:
video_dict = {v.pk: v for v in videos}
videos = [
video_dict[vid] for vid in ordered_video_ids
if vid in video_dict
]
# build the response
return VideoListResponse(
items=map(self.slim_mapper.map, videos),
is_list=True
)
if has_video_search_args(request):
# don't cache search results
return _video_list(request, project)
else:
return cache_manager.get_or_set(
_video_list,
request,
project,
message_type=VideoListResponse,
timeout=10)
@greenday_method(VideoEntityContainer, VideoResponseMessage,
path='project/{project_id}/video/{youtube_id}',
http_method='GET',
name='get',
pre_middlewares=[auth_required])
def video_get(self, request):
"""
API Endpoint to retrieve a project video.
"""
project = self.get_project(request.project_id, assigned_only=True)
video = get_obj_or_api_404(
Video.non_trashed_objects
.select_related("youtube_video")
.prefetch_related('videocollectionvideos'),
youtube_id=request.youtube_id,
project_id=project.pk
)
try:
user_video_detail = video.related_users.get(
user=self.current_user)
except UserVideoDetail.DoesNotExist:
watched = False
else:
watched = user_video_detail.watched
return self.mapper.map(video, watched=watched)
@greenday_method(VideoInsertEntityContainer, VideoResponseMessage,
path='project/{project_id}/video', http_method='POST',
name='insert',
pre_middlewares=[auth_required])
@api_appevent(
EventKind.VIDEOCREATED,
id_getter_post=lambda s, req, res: res.id,
video_id_getter_post=lambda s, req, res: res.id,
project_id_getter=lambda s, req: req.project_id)
def video_insert(self, request):
"""
API Endpoint to add a video to a project
"""
project = self.get_project(request.project_id, assigned_only=True)
youtube_client = YouTubeClient()
yt_video = youtube_client.update_or_create_videos(
(request.youtube_id,))[0]
try:
video = (
Video.all_objects
.get(
youtube_video__youtube_id=request.youtube_id,
project_id=project.pk
))
except Video.DoesNotExist:
video = Video(
project_id=project.pk,
youtube_video=yt_video,
youtube_id=yt_video.youtube_id,
user_id=self.current_user.pk,
)
else:
if not (video.trashed_at or video.archived_at):
raise BadRequestException(
"The video '%s' already exists in this project" %
request.youtube_id)
else:
# if it's trashed or archived then restore it
video.archived_at = video.trashed_at = None
video.save()
# attach youtube video for mapping response
video.youtube_video = yt_video
remove_video_list_cache(project)
return self.mapper.map(video)
@greenday_method(VideoUpdateEntityContainer, VideoResponseMessage,
path='project/{project_id}/video/{youtube_id}',
http_method='PUT',
name='update',
pre_middlewares=[auth_required])
def video_update(self, request):
"""
API Endpoint to update a project video
"""
project = self.get_project(request.project_id, assigned_only=True)
video = get_obj_or_api_404(
Video.objects.select_related("youtube_video"),
youtube_id=request.youtube_id,
project_id=project.pk
)
video.location_overridden = request.location_overridden
video.recorded_date_overridden = request.recorded_date_overridden
video.precise_location = request.precise_location
if request.location_overridden:
video.latitude = request.latitude
video.longitude = request.longitude
else:
video.latitude = video.longitude = None
if request.recorded_date_overridden:
video.recorded_date = request.recorded_date
else:
video.recorded_date = None
video.save()
client = YouTubeClient()
client.update_videos((video.youtube_video,))
video.youtube_video.save()
publish_appevent(
EventKind.VIDEOUPDATED,
object_id=video.pk,
video_id=video.pk,
project_id=project.pk,
user=self.current_user)
return self.mapper.map(video)
@greenday_method(VideoUpdateEntityContainer, VideoResponseMessage,
path='project/{project_id}/video/{youtube_id}',
http_method='PATCH',
name='patch',
pre_middlewares=[auth_required])
def video_patch(self, request):
"""
API Endpoint to patch a project video
"""
project = self.get_project(request.project_id, assigned_only=True)
video = get_obj_or_api_404(
Video.objects.select_related("youtube_video"),
youtube_id=request.youtube_id,
project_id=project.pk
)
if request.location_overridden is not None:
video.location_overridden = request.location_overridden
if request.recorded_date_overridden is not None:
video.recorded_date_overridden = request.recorded_date_overridden
if request.precise_location is not None:
video.precise_location = request.precise_location
if video.location_overridden:
if request.latitude is not None:
video.latitude = request.latitude
if request.longitude is not None:
video.longitude = request.longitude
else:
video.latitude = video.longitude = None
if video.recorded_date_overridden:
video.recorded_date = request.recorded_date
else:
video.recorded_date = None
video.save()
publish_appevent(
EventKind.VIDEOUPDATED,
object_id=video.pk,
video_id=video.pk,
project_id=project.pk,
user=self.current_user)
return self.mapper.map(video)
@greenday_method(VideoEntityContainer, message_types.VoidMessage,
path='project/{project_id}/video/{youtube_id}',
http_method='DELETE',
name='delete',
pre_middlewares=[auth_required])
def video_delete(self, request):
"""
API Endpoint to delete a project video
"""
project = self.get_project(request.project_id, assigned_only=True)
video = get_obj_or_api_404(
Video.non_trashed_objects.all(),
project=project,
youtube_id=request.youtube_id)
if (not self.current_user.is_superuser and
not project.is_owner_or_admin(self.current_user) and
self.current_user.pk != video.user_id):
raise ForbiddenException
video_id = video.pk
video.delete(trash=False)
remove_video_list_cache(project)
publish_appevent(
EventKind.VIDEODELETED,
object_id=video_id,
video_id=video_id,
project_id=project.pk,
user=self.current_user
)
return message_types.VoidMessage()
@greenday_method(
VideoBatchContainer, BatchListResponseMessage,
path='project/{project_id}/video/batch-delete',
http_method='PUT', name='video_batch_delete',
pre_middlewares=[auth_required])
def video_batch_delete(self, request):
"""
API Endpoint to soft delete a batch of videos.
This will never hard delete a video
"""
project = self.get_project(request.project_id, assigned_only=True)
is_owner_or_admin = project.is_owner_or_admin(self.current_user)
videos = {
v.youtube_id: v for v in
Video.all_objects
.filter(
trashed_at__isnull=True,
project_id=project.pk,
youtube_id__in=request.youtube_ids
)
}
does_not_exist = [
make_batch_response_message(yt_id, not_found=True)
for yt_id in set(request.youtube_ids) - set(videos.keys())
]
video_ids_to_delete, permission_denied, success = [], [], []
for yt_id, video in videos.items():
if self.current_user.id != video.user_id and not (
self.current_user.is_superuser or is_owner_or_admin):
permission_denied.append(
make_batch_response_message(yt_id, forbidden=True))
else:
video_ids_to_delete.append(video.pk)
success.append(
make_batch_response_message(yt_id, success=True))
(Video.all_objects
.filter(project=project, pk__in=video_ids_to_delete)
.delete())
remove_video_list_cache(project)
return BatchListResponseMessage(
items=success + does_not_exist + permission_denied,
is_list=True)
@greenday_method(
ArchiveVideoBatchContainer, VideoBatchListResponseMessage,
path='project/{project_id}/video/batch-archive',
http_method='PUT', name='video_batch_archive',
pre_middlewares=[auth_required])
def video_batch_archive(self, request):
"""
Archive or unarchive a batch of videos
Pass unarchive=true to unarchive
"""
project = self.get_project(request.project_id, assigned_only=True)
is_owner_or_admin = project.is_owner_or_admin(self.current_user)
qs = Video.archived_objects if request.unarchive else Video.objects
videos = {
v.youtube_id: v for v in
qs
.filter(
project_id=project.pk,
youtube_id__in=request.youtube_ids
)
}
does_not_exist = [
make_batch_response_message(yt_id, not_found=True)
for yt_id in set(request.youtube_ids) - set(videos.keys())
]
video_ids_to_update, permission_denied, success = [], [], []
for yt_id, video in videos.items():
if self.current_user.id != video.user_id and not (
self.current_user.is_superuser or is_owner_or_admin):
permission_denied.append(
make_batch_response_message(yt_id, forbidden=True))
else:
video_ids_to_update.append(video.pk)
success.append(
make_batch_response_message(yt_id, success=True))
(
qs
.filter(project=project, pk__in=video_ids_to_update)
.update(archived_at=None if request.unarchive else timezone.now()))
remove_video_list_cache(project)
for video_id in video_ids_to_update:
publish_appevent(
kind=EventKind.VIDEOUNARCHIVED if
request.unarchive else EventKind.VIDEOARCHIVED,
object_id=video_id,
video_id=video_id,
project_id=project.pk,
user=self.current_user)
videos = (
Video.all_objects
.select_related("youtube_video")
.filter(pk__in=video_ids_to_update)
if video_ids_to_update else []
)
return VideoBatchListResponseMessage(
items=success + does_not_exist + permission_denied,
videos=map(self.mapper.map, videos),
is_list=True)
@greenday_method(
VideoBatchContainer, VideoBatchListResponseMessage,
path='project/{project_id}/video/batch-mark-as-duplicate',
http_method='PUT', name='video_batch_mark_as_duplicate',
pre_middlewares=[auth_required])
def video_batch_mark_as_duplicate(self, request):
"""
Takes a list of video IDs are marks them all as duplicates of
one another
"""
project = self.get_project(request.project_id, assigned_only=True)
videos = {
v.pk: v for v in
Video.objects
.select_related("youtube_video")
.filter(
project_id=project.pk,
youtube_id__in=request.youtube_ids
)
}
video_ids = videos.keys()
youtube_ids = [v.youtube_id for v in videos.values()]
combinations = [sorted(x) for x in itertools.combinations(video_ids, 2)]
existing_marker_query = Q()
for id1, id2 in combinations:
existing_marker_query = (
existing_marker_query |
Q(video_1=id1, video_2=id2)
)
existing_markers = list(
DuplicateVideoMarker.objects
.filter(existing_marker_query)
.values_list("video_1", "video_2")
)
new_markers = []
for id1, id2 in combinations:
if (id1, id2,) not in existing_markers:
new_markers.append(DuplicateVideoMarker(
video_1=videos[id1],
video_2=videos[id2]))
DuplicateVideoMarker.objects.bulk_create(new_markers)
return VideoBatchListResponseMessage(
items=[
make_batch_response_message(yt_id, not_found=True)
for yt_id in set(request.youtube_ids) - set(youtube_ids)
] + [
make_batch_response_message(yt_id, success=True)
for yt_id in youtube_ids
],
videos=map(self.mapper.map, videos.values()),
is_list=True)
@greenday_method(
VideoIDBatchContainer, VideoBatchListResponseMessage,
path='project/{project_id}/video/{youtube_id}/batch-mark-video-as-duplicate',
http_method='PUT', name='video_batch_mark_video_as_duplicate',
pre_middlewares=[auth_required])
def video_batch_mark_video_as_duplicate(self, request):
"""
Takes a list of video IDs and makes them all as a duplicate of the
given video
"""
project = self.get_project(request.project_id, assigned_only=True)
video = get_obj_or_api_404(
Video, project=project, youtube_id=request.youtube_id)
videos = {
v.pk: v for v in
Video.objects
.select_related("youtube_video")
.filter(
project_id=project.pk,
youtube_id__in=request.youtube_ids
)
}
video_ids = videos.keys()
youtube_ids = [v.youtube_id for v in videos.values()]
combinations = [sorted((video.pk, vid,)) for vid in video_ids]
existing_marker_query = Q()
for id1, id2 in combinations:
existing_marker_query = (
existing_marker_query |
Q(video_1=id1, video_2=id2)
)
existing_markers = list(
DuplicateVideoMarker.objects
.filter(existing_marker_query)
.values_list("video_1", "video_2")
)
new_markers = []
for vid in video_ids:
if tuple(sorted((video.pk, vid,))) not in existing_markers:
new_markers.append(DuplicateVideoMarker(
video_1=video,
video_2=videos[vid]))
DuplicateVideoMarker.objects.bulk_create(new_markers)
return VideoBatchListResponseMessage(
items=[
make_batch_response_message(yt_id, not_found=True)
for yt_id in set(request.youtube_ids) - set(youtube_ids)
] + [
make_batch_response_message(yt_id, success=True)
for yt_id in youtube_ids
],
videos=map(self.mapper.map, videos.values()),
is_list=True)
@greenday_method(
CreateVideoBatchContainer, VideoBatchListResponseMessage,
path='project/{project_id}/video/batch-create',
http_method='PUT', name='video_batch_create',
pre_middlewares=[auth_required])
def video_batch_create(self, request):
project = self.get_project(request.project_id, assigned_only=True)
duplicate_videos = {
v.youtube_video.youtube_id: v for v in
Video.objects
.filter(
project=project,
youtube_video__youtube_id__in=request.youtube_ids
)
}
success, error, videos = [], [], []
for youtube_id in request.youtube_ids:
if youtube_id in duplicate_videos:
video = duplicate_videos[youtube_id]
error.append(
make_batch_response_message(
video.youtube_id,
error="The video '{0}' already exists in this project"
.format(youtube_id)))
videos.append(duplicate_videos[youtube_id])
continue
youtube_ids_of_videos_to_add = [
yt_id for yt_id in request.youtube_ids
if yt_id not in duplicate_videos]
youtube_client = YouTubeClient()
youtube_videos = {
v.youtube_id: v for v in
youtube_client.update_or_create_videos(
youtube_ids_of_videos_to_add)
}
for yt_id, yt_video in youtube_videos.items():
try:
video = (
Video.all_objects
.get(
youtube_video__youtube_id=yt_id,
project_id=project.pk
))
except Video.DoesNotExist:
video = Video(
project_id=project.pk,
youtube_video=yt_video,
youtube_id=yt_video.youtube_id,
user_id=self.current_user.pk,
)
else:
if not (video.trashed_at or video.archived_at):
error.append(
make_batch_response_message(
None,
error="The video '{0}' already exists in "
"this project".format(yt_id))
)
else:
# if it's trashed or archived then restore it
video.archived_at = video.trashed_at = None
video.save()
# attach youtube video for mapping response
video.youtube_video = yt_video
videos.append(video)
success.append(
make_batch_response_message(video.youtube_id, success=True))
publish_appevent(
EventKind.VIDEOCREATED,
object_id=video.pk,
video_id=video.pk,
project_id=project.pk,
user=self.current_user)
remove_video_list_cache(project)
return VideoBatchListResponseMessage(
items=success + error,
videos=map(self.mapper.map, videos),
is_list=True)
@greenday_method(
VideoDuplicateEntityContainer, VideoResponseMessage,
path='project/{project_id}/video/{youtube_id}/duplicates',
http_method='PUT',
name='mark_duplicate', pre_middlewares=[auth_required])
def video_mark_duplicate(self, request):
"""
API Endpoint to mark a video as a duplicate of another video
even if the 2 videos have unique YouTube ID's.
"""
project = self.get_project(request.project_id, assigned_only=True)
if request.youtube_id == request.duplicate_of_id:
raise ForbiddenException(
'You cannot mark a video as a duplicate of itself.')
videos = {
v.youtube_id: v for v in
Video.objects
.select_related("youtube_video")
.filter(
project=project,
youtube_id__in=(
request.youtube_id, request.duplicate_of_id,)
)
}
if not len(videos) == 2:
raise NotFoundException
video = videos[request.youtube_id]
duplicate_of = videos[request.duplicate_of_id]
DuplicateVideoMarker.add_marker(video, duplicate_of)
return self.mapper.map(duplicate_of)
@greenday_method(
VideoEntityContainer, VideoListResponse,
path='project/{project_id}/video/{youtube_id}/duplicates',
http_method='GET',
name='list_duplicates',
pre_middlewares=[auth_required])
def video_duplicates_list(self, request):
"""
API Endpoint to list all videos in the current project that have
been marked as duplicates of other project videos.
"""
project = self.get_project(request.project_id, assigned_only=True)
video = get_obj_or_api_404(
Video, project=project, youtube_id=request.youtube_id)
return VideoListResponse(
items=map(self.slim_mapper.map, video.get_duplicates()),
is_list=True
)
@greenday_method(
VideoIDBatchContainer, VideoBatchListResponseMessage,
path='project/{project_id}/video/{youtube_id}/batch-delete-duplicate-markers',
http_method='PUT',
name='batch_delete_duplicate_markers',
pre_middlewares=[auth_required])
def video_batch_delete_duplicate_marker(self, request):
"""
Deletes the duplicate marker for a list of videos which are already
marked as a duplicate of the given video
"""
project = self.get_project(request.project_id, assigned_only=True)
video = get_obj_or_api_404(
Video, project=project, youtube_id=request.youtube_id)
videos = {
v.pk: v for v in
Video.objects
.select_related("youtube_video")
.filter(
project_id=project.pk,
youtube_id__in=request.youtube_ids)
}
video_ids = videos.keys()
youtube_ids = [v.youtube_id for v in videos.values()]
combinations = [sorted((video.pk, vid,)) for vid in video_ids]
existing_marker_query = Q()
for id1, id2 in combinations:
existing_marker_query = (
existing_marker_query | Q(video_1=id1, video_2=id2))
existing_markers = list(
DuplicateVideoMarker.objects
.filter(existing_marker_query)
.values_list("video_1", "video_2")
)
missing_marker_ids = [
v.youtube_id for v in videos.values()
if tuple(sorted((video.pk, v.pk,))) not in existing_markers
]
DuplicateVideoMarker.objects.filter(existing_marker_query).delete()
return VideoBatchListResponseMessage(
items=[
make_batch_response_message(yt_id, not_found=True)
for yt_id in set(request.youtube_ids) - set(youtube_ids)
] + [
make_batch_response_message(yt_id, success=True)
for yt_id in youtube_ids if yt_id not in missing_marker_ids
] + [
BatchVideoResponseMessage(
youtube_id=yt_id,
success=False,
msg='Video with youtube_id {0} is not marked as a duplicate'
.format(yt_id)) for yt_id in missing_marker_ids
],
videos=map(self.mapper.map, videos.values()),
is_list=True)
@greenday_method(
VideoEntityContainer, message_types.VoidMessage,
path='project/{project_id}/video/{youtube_id}/unarchive',
http_method='PUT',
name='unarchive',
pre_middlewares=[auth_required])
def video_unarchive(self, request):
"""
Unarchives the given video
"""
project = self.get_project(request.project_id, assigned_only=True)
video = get_obj_or_api_404(
Video.archived_objects.all(),
youtube_id=request.youtube_id,
project=project)
video.unarchive()
remove_video_list_cache(project)
publish_appevent(
kind=EventKind.VIDEOUNARCHIVED,
object_id=video.pk,
video_id=video.pk,
project_id=project.pk,
user=self.current_user)
return message_types.VoidMessage()
@greenday_method(
VideoBooleanFlagEntityContainer, VideoResponseMessage,
path='project/{project_id}/video/{youtube_id}/set-favourite',
http_method='PUT', name='video_set_favourite',
pre_middlewares=[auth_required])
def video_set_favourite(self, request):
"""
API Endpoint to allow a user to set whether a video is favourite
or not
"""
project = self.get_project(request.project_id, assigned_only=True)
video = get_obj_or_api_404(
Video.objects.select_related("youtube_video"),
youtube_id=request.youtube_id,
project_id=project.pk)
video.favourited = request.value
video.save()
return self.mapper.map(video)
@greenday_method(
VideoFavouriteBatchContainer,
BatchListResponseMessage,
path='project/{project_id}/video/batch-favourite',
http_method='PUT', name='video_batch_mark_favourite',
pre_middlewares=[auth_required])
def video_batch_mark_favourite(self, request):
"""
API Endpoint to mark a batch of videos as favourites.
"""
project = self.get_project(request.project_id, assigned_only=True)
video_ids = {
youtube_id: pk for pk, youtube_id in
Video.objects
.filter(project_id=project.pk, youtube_id__in=request.youtube_ids)
.values_list('pk', 'youtube_id')
}
(Video.objects
.filter(project=project, pk__in=video_ids.values())
.update(favourited=request.value))
remove_video_list_cache(project)
return BatchListResponseMessage(
items=[
make_batch_response_message(yt_id, not_found=True)
for yt_id in set(request.youtube_ids) - set(video_ids.keys())
] + [
make_batch_response_message(yt_id, success=True)
for yt_id in video_ids.keys()
],
is_list=True
)
@greenday_method(
VideoBooleanFlagEntityContainer, UserVideoDetailResponseMessage,
path='project/{project_id}/video/{youtube_id}/mark-watched',
http_method='PUT', name='video_user_mark_watched',
pre_middlewares=[auth_required])
def video_user_mark_watched(self, request):
"""
API Endpoint to allow a user to mark whether they have
watched a video or not.
"""
project = self.get_project(request.project_id, assigned_only=True)
try:
video = Video.objects.get(
youtube_id=request.youtube_id, project=project.pk)
except Video.DoesNotExist:
try:
video = Video.archived_objects.get(
youtube_id=request.youtube_id, project=project.pk)
except Video.DoesNotExist:
raise NotFoundException('Video instance not found!')
try:
user_detail = UserVideoDetail.objects.get(
video_id=video.pk, user=self.current_user)
except UserVideoDetail.DoesNotExist:
user_detail = UserVideoDetail.objects.create(
video=video, user=self.current_user, watched=request.value)
else:
user_detail.watched = request.value
user_detail.save()
return self.fave_mapper.map(user_detail)
@greenday_method(
VideoFilterContainer, VideoTagListResponse,
path='project/{project_id}/video/filter_by_tags',
http_method='GET', name='video_tag_filter',
pre_middlewares=[auth_required])
def video_tag_filter(self, request):
"""
API endpoint to return tag instances applied to videos
Accepts filters to restrict the set of videos to return tag
instances for.
"""
project = self.get_project(request.project_id, assigned_only=True)
def _video_tag_filter(request):
if request.archived:
qs = Video.archived_objects.all()
else:
qs = Video.objects.all()
qs = qs.select_related("youtube_video")
if has_video_search_args(request):
video_search = VideoSearch.create(
"videos", VideoDocument, ids_only=True)
video_search = video_search.multi_filter(request)
# for now, this is a plain search on the video index.
# We then get all of the tags from the database and
# map them in memory
ids = video_search[:1000]
video_map = qs.in_bulk(ids)
else:
video_map = {v.id: v for v in qs.filter(project=project)}
return self.video_tag_mapper.map(video_map)
if has_video_search_args(request):
# don't cache search results
return _video_tag_filter(request)
return cache_manager.get_or_set(
_video_tag_filter,
request,
message_type=VideoTagListResponse)
| 35.079637 | 86 | 0.595678 |
aced5d2710c443ad6d1a2f7fcc1f27533ecd319c | 5,811 | py | Python | design_bench/oracles/exact/tf_bind_10_oracle.py | brandontrabucco/design_bench | 824516ec59396aded3ca55ec7c1c313626ecaceb | [
"MIT"
] | 27 | 2020-06-30T00:57:12.000Z | 2022-03-25T16:24:11.000Z | design_bench/oracles/exact/tf_bind_10_oracle.py | brandontrabucco/design_bench | 824516ec59396aded3ca55ec7c1c313626ecaceb | [
"MIT"
] | 7 | 2021-02-16T06:25:02.000Z | 2022-03-31T17:21:17.000Z | design_bench/oracles/exact/tf_bind_10_oracle.py | brandontrabucco/design_bench | 824516ec59396aded3ca55ec7c1c313626ecaceb | [
"MIT"
] | 5 | 2021-07-19T12:16:32.000Z | 2022-03-01T16:56:16.000Z | from design_bench.oracles.exact_oracle import ExactOracle
from design_bench.datasets.discrete_dataset import DiscreteDataset
from design_bench.datasets.discrete.tf_bind_10_dataset import TFBind10Dataset
import numpy as np
class TFBind10Oracle(ExactOracle):
"""An abstract class for managing the ground truth score functions f(x)
for model-based optimization problems, where the
goal is to find a design 'x' that maximizes a prediction 'y':
max_x { y = f(x) }
Public Attributes:
external_dataset: DatasetBuilder
an instance of a subclass of the DatasetBuilder class which points to
the mutable task dataset for a model-based optimization problem
internal_dataset: DatasetBuilder
an instance of a subclass of the DatasetBuilder class which has frozen
statistics and is used for training the oracle
is_batched: bool
a boolean variable that indicates whether the evaluation function
implemented for a particular oracle is batched, which effects
the scaling coefficient of its computational cost
internal_batch_size: int
an integer representing the number of design values to process
internally at the same time, if None defaults to the entire
tensor given to the self.score method
internal_measurements: int
an integer representing the number of independent measurements of
the prediction made by the oracle, which are subsequently
averaged, and is useful when the oracle is stochastic
noise_std: float
the standard deviation of gaussian noise added to the prediction
values 'y' coming out of the ground truth score function f(x)
in order to make the optimization problem difficult
expect_normalized_y: bool
a boolean indicator that specifies whether the inputs to the oracle
score function are expected to be normalized
expect_normalized_x: bool
a boolean indicator that specifies whether the outputs of the oracle
score function are expected to be normalized
expect_logits: bool
a boolean that specifies whether the oracle score function is
expecting logits when the dataset is discrete
Public Methods:
predict(np.ndarray) -> np.ndarray:
a function that accepts a batch of design values 'x' as input and for
each design computes a prediction value 'y' which corresponds
to the score in a model-based optimization problem
check_input_format(DatasetBuilder) -> bool:
a function that accepts a list of integers as input and returns true
when design values 'x' with the shape specified by that list are
compatible with this class of approximate oracle
"""
name = "exact_ddg"
@classmethod
def supported_datasets(cls):
"""An attribute the defines the set of dataset classes which this
oracle can be applied to forming a valid ground truth score
function for a model-based optimization problem
"""
return {TFBind10Dataset}
@classmethod
def fully_characterized(cls):
"""An attribute the defines whether all possible inputs to the
model-based optimization problem have been evaluated and
are are returned via lookup in self.predict
"""
return True
@classmethod
def is_simulated(cls):
"""An attribute the defines whether the values returned by the oracle
were obtained by running a computer simulation rather than
performing physical experiments with real data
"""
return False
def protected_predict(self, x):
"""Score function to be implemented by oracle subclasses, where x is
either a batch of designs if self.is_batched is True or is a
single design when self._is_batched is False
Arguments:
x_batch: np.ndarray
a batch or single design 'x' that will be given as input to the
oracle model in order to obtain a prediction value 'y' for
each 'x' which is then returned
Returns:
y_batch: np.ndarray
a batch or single prediction 'y' made by the oracle model,
corresponding to the ground truth score for each design
value 'x' in a model-based optimization problem
"""
x_key = tuple(x.tolist())
return self.sequence_to_score[x_key].astype(np.float32) \
if x_key in self.sequence_to_score else np.full(
[1], self.internal_dataset.dataset_min_output, dtype=np.float32)
def __init__(self, dataset: DiscreteDataset, **kwargs):
"""Initialize the ground truth score function f(x) for a model-based
optimization problem, which involves loading the parameters of an
oracle model and estimating its computational cost
Arguments:
dataset: DiscreteDataset
an instance of a subclass of the DatasetBuilder class which has
a set of design values 'x' and prediction values 'y', and defines
batching and sampling methods for those attributes
"""
# initialize the oracle using the super class
super(TFBind10Oracle, self).__init__(
dataset, is_batched=False,
internal_batch_size=1, internal_measurements=1,
expect_normalized_y=False,
expect_normalized_x=False, expect_logits=False, **kwargs)
# dictionary containing every point in the search space
self.sequence_to_score = dict()
self.internal_dataset._disable_transform = True
for x, y in self.internal_dataset.iterate_samples():
self.sequence_to_score[tuple(x.tolist())] = y
self.internal_dataset._disable_transform = False
| 38.230263 | 78 | 0.695061 |
aced5d386214e3200289b4fd52ba9c42a9742826 | 301 | py | Python | vbb/language/serializers.py | VillageBookBuilders/vbb-portal-packend | 9563b492aa93f12fdfed41a905ff185182e97dd8 | [
"MIT"
] | 1 | 2022-03-30T18:12:49.000Z | 2022-03-30T18:12:49.000Z | vbb/language/serializers.py | VillageBookBuilders/vbb-portal-backend | decdec392f7bd585b73e5554b20c17baea5d133d | [
"MIT"
] | 22 | 2022-02-28T02:37:03.000Z | 2022-03-28T02:32:35.000Z | vbb/language/serializers.py | VillageBookBuilders/vbb-portal-packend | 9563b492aa93f12fdfed41a905ff185182e97dd8 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from vbb.language.models import Language
class LanguageSerializer(serializers.ModelSerializer):
class Meta:
model = Language
fields = [
"id",
"english_display_name",
"name_in_native_alphabet",
]
| 21.5 | 54 | 0.637874 |
aced5e1e19091c3914020d046b9d2823f6799465 | 963 | py | Python | fserver.py | aerendon/map-reduce | ddae9bc1858c0da3639d13091de5164889ca7d5c | [
"MIT"
] | null | null | null | fserver.py | aerendon/map-reduce | ddae9bc1858c0da3639d13091de5164889ca7d5c | [
"MIT"
] | null | null | null | fserver.py | aerendon/map-reduce | ddae9bc1858c0da3639d13091de5164889ca7d5c | [
"MIT"
] | null | null | null | #!env/bin/python3
from termcolor import colored
import os
import json
import sys
import zmq
import ffile
context = zmq.Context()
def clear():
os.system('clear') # on linux / os x
# os.system('cls') # on windows
def server_info(server):
print (colored('############# SERVER', 'magenta'))
print (colored('My IP -->' + server['ip'] + ':' + server['port'], 'magenta'))
print (colored('#############', 'magenta'))
def send_chunk(server, mappers, data):
mappers = mappers.split('\n')
for mapper in mappers:
socket_send = context.socket(zmq.REQ)
send_server = ffile.create_req('send_server', server['ip'] + ':' + server['port'], mapper, {'origin': server['ip'] + ':' + server['port'], 'data': data})
socket_send.connect('tcp://' + mapper)
socket_send.send_string(json.dumps(send_server))
message = socket_send.recv()
print(colored(message.decode("utf-8"), 'green'))
| 26.027027 | 161 | 0.597092 |
aced5e2450e969ffeacb9ed96e040c3a2d93fb88 | 1,351 | py | Python | stringtest.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | 1 | 2021-06-07T07:55:28.000Z | 2021-06-07T07:55:28.000Z | stringtest.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | null | null | null | stringtest.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | null | null | null | # Python Program To Use The String Testing Methods
'''
Function Name : String Testing Methods .
Function Date : 3 Sep 2020
Function Author : Prasad Dangare
Input : Integer,String
Output : Integer,String
'''
# Check If All The Characters In The Text Are Alphanumeric Using isalnum() Method
txt = "Company12"
x = txt.isalnum()
print(x)
print("Next \n")
# Check If All The Characters In The Text Are Letters Using isalpha() Method
txt = "CompanyX"
x = txt.isalpha()
print(x)
print("Next \n")
# Check If All The Characters In The Text Are Digits Using isdigit() Method
txt = "50800"
x = txt.isdigit()
print(x)
print("Next \n")
# Check If All The Characters In The Text Are In Lower Case Using islower() Method
txt = "hello world!"
x = txt.islower()
print(x)
print("Next \n")
# Check If All The Characters In The Text Are In Upper Case Using isupper() Method
txt = "THIS IS NOW!"
x = txt.isupper()
print(x)
print("Next \n")
# Check If Each Word Start With An Upper Case Letter Using istitle() Method
txt = "Hello, And Welcome To My World!"
x = txt.istitle()
print(x)
print("Next \n")
# Check If All The Characters In The Text Are Whitespaces Using isspace() Method
txt = " "
x = txt.isspace()
print(x)
print("Last \n") | 18.763889 | 83 | 0.641747 |
aced5e2cfb28caf8fd922b337cf3bd1a6eb5bcdf | 3,347 | py | Python | qemu_mode/qemu-2.10.0/roms/seabios/scripts/checkrom.py | braymar/afl | a6b2dad6bbd9c9401814e088582bc04a074651eb | [
"Apache-2.0"
] | 8 | 2020-03-16T06:34:49.000Z | 2021-12-06T01:50:54.000Z | qemu_mode/qemu-2.10.0/roms/seabios/scripts/checkrom.py | braymar/afl | a6b2dad6bbd9c9401814e088582bc04a074651eb | [
"Apache-2.0"
] | null | null | null | qemu_mode/qemu-2.10.0/roms/seabios/scripts/checkrom.py | braymar/afl | a6b2dad6bbd9c9401814e088582bc04a074651eb | [
"Apache-2.0"
] | 7 | 2020-09-08T15:14:34.000Z | 2021-06-24T18:03:49.000Z | #!/usr/bin/env python
# Script to check a bios image and report info on it.
#
# Copyright (C) 2008 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys, struct
import layoutrom, buildrom
from python23compat import as_bytes
def subst(data, offset, new):
return data[:offset] + new + data[offset + len(new):]
def checksum(data, start, size, csum):
sumbyte = buildrom.checksum(data[start:start+size])
return subst(data, start+csum, sumbyte)
def main():
# Get args
objinfo, finalsize, rawfile, outfile = sys.argv[1:]
# Read in symbols
objinfofile = open(objinfo, 'r')
symbols = layoutrom.parseObjDump(objinfofile, 'in')[1]
# Read in raw file
f = open(rawfile, 'rb')
rawdata = f.read()
f.close()
datasize = len(rawdata)
finalsize = int(finalsize) * 1024
if finalsize == 0:
finalsize = 64*1024
if datasize > 64*1024:
finalsize = 128*1024
if datasize > 128*1024:
finalsize = 256*1024
if datasize > finalsize:
print("Error! ROM doesn't fit (%d > %d)" % (datasize, finalsize))
print(" You have to either increase the size (CONFIG_ROM_SIZE)")
print(" or turn off some features (such as hardware support not")
print(" needed) to make it fit. Trying a more recent gcc version")
print(" might work too.")
sys.exit(1)
# Sanity checks
start = symbols['code32flat_start'].offset
end = symbols['code32flat_end'].offset
expend = layoutrom.BUILD_BIOS_ADDR + layoutrom.BUILD_BIOS_SIZE
if end != expend:
print("Error! Code does not end at 0x%x (got 0x%x)" % (
expend, end))
sys.exit(1)
if datasize > finalsize:
print("Error! Code is too big (0x%x vs 0x%x)" % (
datasize, finalsize))
sys.exit(1)
expdatasize = end - start
if datasize != expdatasize:
print("Error! Unknown extra data (0x%x vs 0x%x)" % (
datasize, expdatasize))
sys.exit(1)
# Fix up CSM Compatibility16 table
if 'csm_compat_table' in symbols and 'entry_csm' in symbols:
# Field offsets within EFI_COMPATIBILITY16_TABLE
ENTRY_FIELD_OFS = 14 # Compatibility16CallOffset (UINT16)
SIZE_FIELD_OFS = 5 # TableLength (UINT8)
CSUM_FIELD_OFS = 4 # TableChecksum (UINT8)
tableofs = symbols['csm_compat_table'].offset - symbols['code32flat_start'].offset
entry_addr = symbols['entry_csm'].offset - layoutrom.BUILD_BIOS_ADDR
entry_addr = struct.pack('<H', entry_addr)
rawdata = subst(rawdata, tableofs+ENTRY_FIELD_OFS, entry_addr)
tsfield = tableofs+SIZE_FIELD_OFS
tablesize = ord(rawdata[tsfield:tsfield+1])
rawdata = checksum(rawdata, tableofs, tablesize, CSUM_FIELD_OFS)
# Print statistics
runtimesize = end - symbols['code32init_end'].offset
print("Total size: %d Fixed: %d Free: %d (used %.1f%% of %dKiB rom)" % (
datasize, runtimesize, finalsize - datasize
, (datasize / float(finalsize)) * 100.0
, int(finalsize / 1024)))
# Write final file
f = open(outfile, 'wb')
f.write((as_bytes("\0") * (finalsize - datasize)) + rawdata)
f.close()
if __name__ == '__main__':
main()
| 34.864583 | 90 | 0.63161 |
aced5e978159a373f43c6541114f789743ca2d34 | 469 | py | Python | samples/python/13.core-bot/envs/chat_bot_02/Lib/site-packages/botbuilder/dialogs/about.py | luzeunice/BotBuilder-Samples | b62be4e8863125a567902b736b7b74313d9d4f28 | [
"MIT"
] | null | null | null | samples/python/13.core-bot/envs/chat_bot_02/Lib/site-packages/botbuilder/dialogs/about.py | luzeunice/BotBuilder-Samples | b62be4e8863125a567902b736b7b74313d9d4f28 | [
"MIT"
] | null | null | null | samples/python/13.core-bot/envs/chat_bot_02/Lib/site-packages/botbuilder/dialogs/about.py | luzeunice/BotBuilder-Samples | b62be4e8863125a567902b736b7b74313d9d4f28 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
__title__ = "botbuilder-dialogs"
__version__ = (
os.environ["packageVersion"] if "packageVersion" in os.environ else "4.7.1"
)
__uri__ = "https://www.github.com/Microsoft/botbuilder-python"
__author__ = "Microsoft"
__description__ = "Microsoft Bot Framework Bot Builder"
__summary__ = "Microsoft Bot Framework Bot Builder SDK for Python."
__license__ = "MIT"
| 31.266667 | 79 | 0.761194 |
aced5ee8aa0258b9f3a6c53ef941a1e05e63f0fb | 290 | py | Python | database.py | marwin-ko/python | c63b5342efd4a1989cdb0874d983d4e6c5ff17f6 | [
"MIT"
] | null | null | null | database.py | marwin-ko/python | c63b5342efd4a1989cdb0874d983d4e6c5ff17f6 | [
"MIT"
] | null | null | null | database.py | marwin-ko/python | c63b5342efd4a1989cdb0874d983d4e6c5ff17f6 | [
"MIT"
] | null | null | null | """
Author:
Date Created:
Date Last Modified:
Python Version:
"""
import psycopg2
import pandas pd
# set these variables: host, database, user, password, port (if necessary)
conn = psycopg2.connect(host, database, user, password, port)
df = pd.read_sql_query(query, conn)
| 20.714286 | 74 | 0.703448 |
aced5faa0675d8307b6a50dafff4193cfeb4567b | 355 | py | Python | scripts/figures/figure8/pipeswitch_resnet152/remote_run_data.py | CcTtry/PipeSwitch | c6d632ee20b6dbbaea9a6fb95b9ea0ed4bbbf67e | [
"Apache-2.0"
] | null | null | null | scripts/figures/figure8/pipeswitch_resnet152/remote_run_data.py | CcTtry/PipeSwitch | c6d632ee20b6dbbaea9a6fb95b9ea0ed4bbbf67e | [
"Apache-2.0"
] | null | null | null | scripts/figures/figure8/pipeswitch_resnet152/remote_run_data.py | CcTtry/PipeSwitch | c6d632ee20b6dbbaea9a6fb95b9ea0ed4bbbf67e | [
"Apache-2.0"
] | null | null | null | import os
import sys
from scripts.common.util import RunDocker
def main():
with RunDocker('pipeswitch:pipeswitch', 'figure8_pipeswitch_resnet152') as rd:
# Start the server: pipeswitch
rd.run('python PipeSwitch/scripts/run_data.py')
# Get and return the data point
if __name__ == '__main__':
main() | 25.357143 | 83 | 0.656338 |
aced5faebedfba8a97772cfd7aaf9fcf1785cdea | 1,235 | py | Python | configs/_base_/models/shufflenet_ssd.py | TangChangcheng/mmdetection | 24ac8eee3c4b961f1e920c7d7966ad39e2d65969 | [
"Apache-2.0"
] | 1 | 2021-01-14T03:17:02.000Z | 2021-01-14T03:17:02.000Z | configs/_base_/models/shufflenet_ssd.py | TangChangcheng/mmdetection | 24ac8eee3c4b961f1e920c7d7966ad39e2d65969 | [
"Apache-2.0"
] | null | null | null | configs/_base_/models/shufflenet_ssd.py | TangChangcheng/mmdetection | 24ac8eee3c4b961f1e920c7d7966ad39e2d65969 | [
"Apache-2.0"
] | null | null | null | # model settings
input_size = 300
model = dict(
type='SingleStageDetector',
backbone=dict(
type='ShuffleNet',
net_name='shufflenetv2_x1.0',
head_type='SSD'
),
neck=None,
bbox_head=dict(
type='SSDHead',
in_channels=(232, 1024, 512, 256, 256, 256),
num_classes=80,
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.15, 0.9),
strides=[16, 32, 64, 100, 150, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2])))
cudnn_benchmark = True
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False)
test_cfg = dict(
nms=dict(type='nms', iou_threshold=0.45),
min_bbox_size=0,
score_thr=0.02,
max_per_img=200)
| 27.444444 | 60 | 0.557895 |
aced5fbd0ae4d1ea1874b10f2033f079cc8c1ee5 | 2,221 | py | Python | bokeh/sampledata/us_holidays.py | timgates42/bokeh | fb8b07b838f4d07d520cfe899779a11bc89f3c77 | [
"BSD-3-Clause"
] | 1 | 2015-01-31T14:42:39.000Z | 2015-01-31T14:42:39.000Z | bokeh/sampledata/us_holidays.py | timgates42/bokeh | fb8b07b838f4d07d520cfe899779a11bc89f3c77 | [
"BSD-3-Clause"
] | 1 | 2021-05-12T10:14:45.000Z | 2021-05-12T10:14:45.000Z | bokeh/sampledata/us_holidays.py | timgates42/bokeh | fb8b07b838f4d07d520cfe899779a11bc89f3c77 | [
"BSD-3-Clause"
] | 1 | 2020-01-21T12:03:58.000Z | 2020-01-21T12:03:58.000Z | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
https://www.mozilla.org/en-US/projects/calendar/holidays/
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..util.dependencies import import_required
from ..util.sampledata import package_path
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'us_holidays',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _read_data():
'''
'''
ic = import_required('icalendar', "us_holidays data requires icalendar (http://icalendar.readthedocs.org) to be installed")
with open(package_path("USHolidays.ics")) as f:
data = ic.Calendar.from_ical(f.read())
return sorted((comp.get("dtstart").dt, str(comp.get("summary"))) for comp in data.walk() if comp.name == "VEVENT")
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
us_holidays = _read_data()
| 35.253968 | 127 | 0.302116 |
aced5ffd888543e4e3ee7495647f9fb8b3414d47 | 3,212 | py | Python | lluvia/python/src/lluvia/util.py | jadarve/lluvia | f4430f7993c0111a03640f686e4d94b8ecc0efc0 | [
"Apache-2.0"
] | 38 | 2018-05-30T16:10:20.000Z | 2022-03-09T20:18:33.000Z | lluvia/python/src/lluvia/util.py | jadarve/lluvia | f4430f7993c0111a03640f686e4d94b8ecc0efc0 | [
"Apache-2.0"
] | 62 | 2018-02-13T04:26:20.000Z | 2022-02-25T23:32:19.000Z | lluvia/python/src/lluvia/util.py | jadarve/lluvia | f4430f7993c0111a03640f686e4d94b8ecc0efc0 | [
"Apache-2.0"
] | 8 | 2020-05-11T05:47:16.000Z | 2022-02-14T05:31:19.000Z | """
lluvia.util
-----------
:copyright: 2018, Juan David Adarve Bermudez. See AUTHORS for more details.
:license: Apache-2 license, see LICENSE for more details.
"""
import math
import os
import glob
import imageio
import numpy as np
__all__ = [
'calculateGridSize',
'loadNodes',
'readRGBA',
'readSampleImage'
]
def loadNodes(session, glslPath, luaPath):
"""
Load GLSL and Lua nodes into a session.
Parameters
----------
session : Session.
Session where the nodes will be loaded.
glslPath : str.
Path where the SPIR-V files can be found.
luaPath : std.
Path where the Lua files can be found.
"""
shaders = glob.glob(os.path.join(glslPath, '*.spv'))
luas = glob.glob(os.path.join(luaPath, '*.lua'))
for spirv in shaders:
fname = os.path.split(spirv)[-1]
programName = os.path.splitext(fname)[0]
session.setProgram(programName, session.createProgram(spirv))
for lua in luas:
session.scriptFile(lua)
def readRGBA(path):
"""
Reads an image and converts it to RGBA.
Parameters
----------
path : str.
Image path.
Returns
-------
RGBA : np.ndarray.
RGBA image.
"""
img = imageio.imread(path)
RGBA = np.zeros(img.shape[:-1] + tuple([4]), dtype=img.dtype)
RGBA[..., :3] = img
return RGBA
def readSampleImage(name):
"""
Reads a sample image packed with Lluvia.
The available images are:
* mouse
* koala
Parameters
----------
name : str
The name of the image
Returns
-------
RGBA : np.ndarray
RGBA image
"""
# From https://stackoverflow.com/questions/6028000/how-to-read-a-static-file-from-inside-a-python-package
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
import lluvia.resources as rsc
arr = pkg_resources.read_binary(rsc, name + '.jpg')
imreader = imageio.get_reader(arr, '.jpg')
img = imreader.get_data(0)
RGBA = np.zeros(img.shape[:-1] + tuple([4]), dtype=img.dtype)
RGBA[..., :3] = img
return RGBA
def calculateGridSize(local, imgShape):
"""
Calculates the grid size of a compute node given its local size
and an image parameter.
Parameters
----------
local : 3-int tuple.
local size of the compute node.
imgShape : 4-int tuple.
Image shape in (depth, height, width, channels) format.
Returns
-------
grid : 3-int tuple.
(X, Y, Z) grid size.
"""
x, y, z = local
depth, height, width, _ = imgShape
return (__calculateGridAxis(width, x),
__calculateGridAxis(height, y),
__calculateGridAxis(depth, z))
def __calculateGridAxis(length, local):
"""
Calculates the grid axis.
Parameters
----------
length : int.
Global length.
local : int.
Local size.
Returns
-------
grid : int
Calculated grid size.
"""
return int(math.ceil(float(length) / float(local)))
| 20.458599 | 109 | 0.592777 |
aced603af2cf188abe80c0629c5c1d288cb83d6b | 1,729 | py | Python | awxkit/awxkit/api/mixins/has_notifications.py | Avinesh/awx | 6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf | [
"Apache-2.0"
] | 17 | 2021-04-03T01:40:17.000Z | 2022-03-03T11:45:20.000Z | awxkit/awxkit/api/mixins/has_notifications.py | Avinesh/awx | 6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf | [
"Apache-2.0"
] | 24 | 2021-05-18T21:13:35.000Z | 2022-03-29T10:23:52.000Z | awxkit/awxkit/api/mixins/has_notifications.py | hostinger/awx | dac01b14e2c04c201a162ea03ef8386d822e3923 | [
"Apache-2.0"
] | 24 | 2020-11-27T08:37:35.000Z | 2021-03-08T13:27:15.000Z | from awxkit.utils import suppress
import awxkit.exceptions as exc
notification_endpoints = ("notification_templates", "notification_templates_started", "notification_templates_error",
"notification_templates_success")
wfjt_notification_endpoints = notification_endpoints + ('notification_templates_approvals',)
class HasNotifications(object):
def add_notification_template(self, notification_template, endpoint="notification_templates_success"):
from awxkit.api.pages.workflow_job_templates import WorkflowJobTemplate
supported_endpoints = wfjt_notification_endpoints if isinstance(self, WorkflowJobTemplate) \
else notification_endpoints
if endpoint not in supported_endpoints:
raise ValueError('Unsupported notification endpoint "{0}". Please use one of {1}.'
.format(endpoint, notification_endpoints))
with suppress(exc.NoContent):
self.related[endpoint].post(dict(id=notification_template.id))
def remove_notification_template(self, notification_template, endpoint="notification_templates_success"):
from awxkit.api.pages.workflow_job_templates import WorkflowJobTemplate
supported_endpoints = wfjt_notification_endpoints if isinstance(self, WorkflowJobTemplate) \
else notification_endpoints
if endpoint not in supported_endpoints:
raise ValueError('Unsupported notification endpoint "{0}". Please use one of {1}.'
.format(endpoint, notification_endpoints))
with suppress(exc.NoContent):
self.related[endpoint].post(dict(id=notification_template.id, disassociate=notification_template.id))
| 55.774194 | 117 | 0.737999 |
aced605c2249901de5c5a502f1ce9bd8d81f6261 | 37,534 | py | Python | .buildozer/android/platform/build-armeabi-v7a/build/python-installs/mydatabase/kivy/lang/builder.py | VPetras/mobile-test-app | 6708dade6873ae2fb1ecb13aa70662f95fb42dc6 | [
"MIT"
] | 6 | 2020-04-10T14:36:25.000Z | 2021-04-25T13:11:32.000Z | .buildozer/android/platform/build-armeabi-v7a/build/python-installs/mydatabase/kivy/lang/builder.py | VPetras/mobile-test-app | 6708dade6873ae2fb1ecb13aa70662f95fb42dc6 | [
"MIT"
] | 6 | 2020-01-31T18:04:48.000Z | 2021-06-05T10:53:55.000Z | .buildozer/android/platform/build-armeabi-v7a/build/python-installs/mydatabase/kivy/lang/builder.py | VPetras/mobile-test-app | 6708dade6873ae2fb1ecb13aa70662f95fb42dc6 | [
"MIT"
] | 1 | 2021-04-08T19:51:07.000Z | 2021-04-08T19:51:07.000Z | '''
Builder
======
Class used for the registering and application of rules for specific widgets.
'''
import codecs
import sys
import types
from os import environ
from os.path import join
from copy import copy
from types import CodeType
from functools import partial
from kivy.factory import Factory
from kivy.lang.parser import Parser, ParserException, _handlers, global_idmap,\
ParserRuleProperty
from kivy.logger import Logger
from kivy.utils import QueryDict
from kivy.cache import Cache
from kivy import kivy_data_dir
from kivy.compat import PY2, iteritems, iterkeys
from kivy.context import register_context
from kivy.resources import resource_find
from kivy._event import Observable, EventDispatcher
__all__ = ('Observable', 'Builder', 'BuilderBase', 'BuilderException')
trace = Logger.trace
# class types to check with isinstance
if PY2:
_cls_type = (type, types.ClassType)
else:
_cls_type = (type, )
# late import
Instruction = None
# delayed calls are canvas expression triggered during an loop. It is one
# directional linked list of args to call call_fn with. Each element is a list
# whos last element points to the next list of args to execute when
# Builder.sync is called.
_delayed_start = None
class BuilderException(ParserException):
'''Exception raised when the Builder failed to apply a rule on a widget.
'''
pass
def get_proxy(widget):
try:
return widget.proxy_ref
except AttributeError:
return widget
def custom_callback(__kvlang__, idmap, *largs, **kwargs):
idmap['args'] = largs
exec(__kvlang__.co_value, idmap)
def call_fn(args, instance, v):
element, key, value, rule, idmap = args
if __debug__:
trace('Lang: call_fn %s, key=%s, value=%r, %r' % (
element, key, value, rule.value))
rule.count += 1
e_value = eval(value, idmap)
if __debug__:
trace('Lang: call_fn => value=%r' % (e_value, ))
setattr(element, key, e_value)
def delayed_call_fn(args, instance, v):
# it's already on the list
if args[-1] is not None:
return
global _delayed_start
if _delayed_start is None:
_delayed_start = args
args[-1] = StopIteration
else:
args[-1] = _delayed_start
_delayed_start = args
def update_intermediates(base, keys, bound, s, fn, args, instance, value):
''' Function that is called when an intermediate property is updated
and `rebind` of that property is True. In that case, we unbind
all bound funcs that were bound to attrs of the old value of the
property and rebind to the new value of the property.
For example, if the rule is `self.a.b.c.d`, then when b is changed, we
unbind from `b`, `c` and `d`, if they were bound before (they were not
None and `rebind` of the respective properties was True) and we rebind
to the new values of the attrs `b`, `c``, `d` that are not None and
`rebind` is True.
:Parameters:
`base`
A (proxied) ref to the base widget, `self` in the example
above.
`keys`
A list of the name off the attrs of `base` being watched. In
the example above it'd be `['a', 'b', 'c', 'd']`.
`bound`
A list 4-tuples, each tuple being (widget, attr, callback, uid)
representing callback functions bound to the attributed `attr`
of `widget`. `uid` is returned by `fbind` when binding.
The callback may be None, in which case the attr
was not bound, but is there to be able to walk the attr tree.
E.g. in the example above, if `b` was not an eventdispatcher,
`(_b_ref_, `c`, None)` would be added to the list so we can get
to `c` and `d`, which may be eventdispatchers and their attrs.
`s`
The index in `keys` of the of the attr that needs to be
updated. That is all the keys from `s` and further will be
rebound, since the `s` key was changed. In bound, the
corresponding index is `s - 1`. If `s` is None, we start from
1 (first attr).
`fn`
The function to be called args, `args` on bound callback.
'''
# first remove all the old bound functions from `s` and down.
for f, k, fun, uid in bound[s:]:
if fun is None:
continue
try:
f.unbind_uid(k, uid)
except ReferenceError:
pass
del bound[s:]
# find the first attr from which we need to start rebinding.
f = getattr(*bound[-1][:2])
if f is None:
fn(args, None, None)
return
s += 1
append = bound.append
# bind all attrs, except last to update_intermediates
for val in keys[s:-1]:
# if we need to dynamically rebind, bindm otherwise just
# add the attr to the list
if isinstance(f, (EventDispatcher, Observable)):
prop = f.property(val, True)
if prop is not None and getattr(prop, 'rebind', False):
# fbind should not dispatch, otherwise
# update_intermediates might be called in the middle
# here messing things up
uid = f.fbind(
val, update_intermediates, base, keys, bound, s, fn, args)
append([f.proxy_ref, val, update_intermediates, uid])
else:
append([f.proxy_ref, val, None, None])
else:
append([getattr(f, 'proxy_ref', f), val, None, None])
f = getattr(f, val, None)
if f is None:
break
s += 1
# for the last attr we bind directly to the setting function,
# because that attr sets the value of the rule.
if isinstance(f, (EventDispatcher, Observable)):
uid = f.fbind(keys[-1], fn, args)
if uid:
append([f.proxy_ref, keys[-1], fn, uid])
# when we rebind we have to update the
# rule with the most recent value, otherwise, the value might be wrong
# and wouldn't be updated since we might not have tracked it before.
# This only happens for a callback when rebind was True for the prop.
fn(args, None, None)
def create_handler(iself, element, key, value, rule, idmap, delayed=False):
idmap = copy(idmap)
idmap.update(global_idmap)
idmap['self'] = iself.proxy_ref
bound_list = _handlers[iself.uid][key]
handler_append = bound_list.append
# we need a hash for when delayed, so we don't execute duplicate canvas
# callbacks from the same handler during a sync op
if delayed:
fn = delayed_call_fn
args = [element, key, value, rule, idmap, None] # see _delayed_start
else:
fn = call_fn
args = (element, key, value, rule, idmap)
# bind every key.value
if rule.watched_keys is not None:
for keys in rule.watched_keys:
base = idmap.get(keys[0])
if base is None:
continue
f = base = getattr(base, 'proxy_ref', base)
bound = []
was_bound = False
append = bound.append
# bind all attrs, except last to update_intermediates
k = 1
for val in keys[1:-1]:
# if we need to dynamically rebind, bindm otherwise
# just add the attr to the list
if isinstance(f, (EventDispatcher, Observable)):
prop = f.property(val, True)
if prop is not None and getattr(prop, 'rebind', False):
# fbind should not dispatch, otherwise
# update_intermediates might be called in the middle
# here messing things up
uid = f.fbind(
val, update_intermediates, base, keys, bound, k,
fn, args)
append([f.proxy_ref, val, update_intermediates, uid])
was_bound = True
else:
append([f.proxy_ref, val, None, None])
elif not isinstance(f, _cls_type):
append([getattr(f, 'proxy_ref', f), val, None, None])
else:
append([f, val, None, None])
f = getattr(f, val, None)
if f is None:
break
k += 1
# for the last attr we bind directly to the setting
# function, because that attr sets the value of the rule.
if isinstance(f, (EventDispatcher, Observable)):
uid = f.fbind(keys[-1], fn, args) # f is not None
if uid:
append([f.proxy_ref, keys[-1], fn, uid])
was_bound = True
if was_bound:
handler_append(bound)
try:
return eval(value, idmap), bound_list
except Exception as e:
tb = sys.exc_info()[2]
raise BuilderException(rule.ctx, rule.line,
'{}: {}'.format(e.__class__.__name__, e),
cause=tb)
class BuilderBase(object):
'''The Builder is responsible for creating a :class:`Parser` for parsing a
kv file, merging the results into its internal rules, templates, etc.
By default, :class:`Builder` is a global Kivy instance used in widgets
that you can use to load other kv files in addition to the default ones.
'''
_match_cache = {}
_match_name_cache = {}
def __init__(self):
super(BuilderBase, self).__init__()
self.files = []
self.dynamic_classes = {}
self.templates = {}
self.rules = []
self.rulectx = {}
def load_file(self, filename, **kwargs):
'''Insert a file into the language builder and return the root widget
(if defined) of the kv file.
:parameters:
`rulesonly`: bool, defaults to False
If True, the Builder will raise an exception if you have a root
widget inside the definition.
'''
filename = resource_find(filename) or filename
if __debug__:
trace('Lang: load file %s' % filename)
with open(filename, 'r') as fd:
kwargs['filename'] = filename
data = fd.read()
# remove bom ?
if PY2:
if data.startswith((codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)):
raise ValueError('Unsupported UTF16 for kv files.')
if data.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)):
raise ValueError('Unsupported UTF32 for kv files.')
if data.startswith(codecs.BOM_UTF8):
data = data[len(codecs.BOM_UTF8):]
return self.load_string(data, **kwargs)
def unload_file(self, filename):
'''Unload all rules associated with a previously imported file.
.. versionadded:: 1.0.8
.. warning::
This will not remove rules or templates already applied/used on
current widgets. It will only effect the next widgets creation or
template invocation.
'''
# remove rules and templates
filename = resource_find(filename) or filename
self.rules = [x for x in self.rules if x[1].ctx.filename != filename]
self._clear_matchcache()
templates = {}
for x, y in self.templates.items():
if y[2] != filename:
templates[x] = y
self.templates = templates
if filename in self.files:
self.files.remove(filename)
# unregister all the dynamic classes
Factory.unregister_from_filename(filename)
def load_string(self, string, **kwargs):
'''Insert a string into the Language Builder and return the root widget
(if defined) of the kv string.
:Parameters:
`rulesonly`: bool, defaults to False
If True, the Builder will raise an exception if you have a root
widget inside the definition.
`filename`: str, defaults to None
If specified, the filename used to index the kv rules.
The filename parameter can be used to unload kv strings in the same way
as you unload kv files. This can be achieved using pseudo file names
e.g.::
Build.load_string("""
<MyRule>:
Label:
text="Hello"
""", filename="myrule.kv")
can be unloaded via::
Build.unload_file("myrule.kv")
'''
kwargs.setdefault('rulesonly', False)
self._current_filename = fn = kwargs.get('filename', None)
# put a warning if a file is loaded multiple times
if fn in self.files:
Logger.warning(
'Lang: The file {} is loaded multiples times, '
'you might have unwanted behaviors.'.format(fn))
try:
# parse the string
parser = Parser(content=string, filename=fn)
# merge rules with our rules
self.rules.extend(parser.rules)
self._clear_matchcache()
# add the template found by the parser into ours
for name, cls, template in parser.templates:
self.templates[name] = (cls, template, fn)
Factory.register(name,
cls=partial(self.template, name),
is_template=True, warn=True)
# register all the dynamic classes
for name, baseclasses in iteritems(parser.dynamic_classes):
Factory.register(name, baseclasses=baseclasses, filename=fn,
warn=True)
# create root object is exist
if kwargs['rulesonly'] and parser.root:
filename = kwargs.get('rulesonly', '<string>')
raise Exception('The file <%s> contain also non-rules '
'directives' % filename)
# save the loaded files only if there is a root without
# template/dynamic classes
if fn and (parser.templates or
parser.dynamic_classes or parser.rules):
self.files.append(fn)
if parser.root:
widget = Factory.get(parser.root.name)(__no_builder=True)
rule_children = []
widget.apply_class_lang_rules(
root=widget, rule_children=rule_children)
self._apply_rule(
widget, parser.root, parser.root,
rule_children=rule_children)
for child in rule_children:
child.dispatch('on_kv_post', widget)
widget.dispatch('on_kv_post', widget)
return widget
finally:
self._current_filename = None
def template(self, *args, **ctx):
'''Create a specialized template using a specific context.
.. versionadded:: 1.0.5
With templates, you can construct custom widgets from a kv lang
definition by giving them a context. Check :ref:`Template usage
<template_usage>`.
'''
# Prevent naming clash with whatever the user might be putting into the
# ctx as key.
name = args[0]
if name not in self.templates:
raise Exception('Unknown <%s> template name' % name)
baseclasses, rule, fn = self.templates[name]
key = '%s|%s' % (name, baseclasses)
cls = Cache.get('kv.lang', key)
if cls is None:
rootwidgets = []
for basecls in baseclasses.split('+'):
rootwidgets.append(Factory.get(basecls))
cls = type(name, tuple(rootwidgets), {})
Cache.append('kv.lang', key, cls)
widget = cls()
# in previous versions, ``ctx`` is passed as is as ``template_ctx``
# preventing widgets in it from be collected by the GC. This was
# especially relevant to AccordionItem's title_template.
proxy_ctx = {k: get_proxy(v) for k, v in ctx.items()}
self._apply_rule(widget, rule, rule, template_ctx=proxy_ctx)
return widget
def apply_rules(
self, widget, rule_name, ignored_consts=set(), rule_children=None,
dispatch_kv_post=False):
'''Search all the rules that match the name `rule_name`
and apply them to `widget`.
.. versionadded:: 1.10.0
:Parameters:
`widget`: :class:`~kivy.uix.widget.Widget`
The widget to whom the matching rules should be applied to.
`ignored_consts`: set
A set or list type whose elements are property names for which
constant KV rules (i.e. those that don't create bindings) of
that widget will not be applied. This allows e.g. skipping
constant rules that overwrite a value initialized in python.
`rule_children`: list
If not ``None``, it should be a list that will be populated
with all the widgets created by the kv rules being applied.
.. versionchanged:: 1.11.0
`dispatch_kv_post`: bool
Normally the class `Widget` dispatches the `on_kv_post` event
to widgets created during kv rule application.
But if the rules are manually applied by calling :meth:`apply`,
that may not happen, so if this is `True`, we will dispatch the
`on_kv_post` event where needed after applying the rules to
`widget` (we won't dispatch it for `widget` itself).
Defaults to False.
.. versionchanged:: 1.11.0
'''
rules = self.match_rule_name(rule_name)
if __debug__:
trace('Lang: Found %d rules for %s' % (len(rules), rule_name))
if not rules:
return
if dispatch_kv_post:
rule_children = rule_children if rule_children is not None else []
for rule in rules:
self._apply_rule(
widget, rule, rule, ignored_consts=ignored_consts,
rule_children=rule_children)
if dispatch_kv_post:
for w in rule_children:
w.dispatch('on_kv_post', widget)
def apply(self, widget, ignored_consts=set(), rule_children=None,
dispatch_kv_post=False):
'''Search all the rules that match the widget and apply them.
:Parameters:
`widget`: :class:`~kivy.uix.widget.Widget`
The widget whose class rules should be applied to this widget.
`ignored_consts`: set
A set or list type whose elements are property names for which
constant KV rules (i.e. those that don't create bindings) of
that widget will not be applied. This allows e.g. skipping
constant rules that overwrite a value initialized in python.
`rule_children`: list
If not ``None``, it should be a list that will be populated
with all the widgets created by the kv rules being applied.
.. versionchanged:: 1.11.0
`dispatch_kv_post`: bool
Normally the class `Widget` dispatches the `on_kv_post` event
to widgets created during kv rule application.
But if the rules are manually applied by calling :meth:`apply`,
that may not happen, so if this is `True`, we will dispatch the
`on_kv_post` event where needed after applying the rules to
`widget` (we won't dispatch it for `widget` itself).
Defaults to False.
.. versionchanged:: 1.11.0
'''
rules = self.match(widget)
if __debug__:
trace('Lang: Found %d rules for %s' % (len(rules), widget))
if not rules:
return
if dispatch_kv_post:
rule_children = rule_children if rule_children is not None else []
for rule in rules:
self._apply_rule(
widget, rule, rule, ignored_consts=ignored_consts,
rule_children=rule_children)
if dispatch_kv_post:
for w in rule_children:
w.dispatch('on_kv_post', widget)
def _clear_matchcache(self):
BuilderBase._match_cache = {}
BuilderBase._match_name_cache = {}
def _apply_rule(self, widget, rule, rootrule, template_ctx=None,
ignored_consts=set(), rule_children=None):
# widget: the current instantiated widget
# rule: the current rule
# rootrule: the current root rule (for children of a rule)
# will collect reference to all the id in children
assert(rule not in self.rulectx)
self.rulectx[rule] = rctx = {
'ids': {'root': widget.proxy_ref},
'set': [], 'hdl': []}
# extract the context of the rootrule (not rule!)
assert(rootrule in self.rulectx)
rctx = self.rulectx[rootrule]
# if a template context is passed, put it as "ctx"
if template_ctx is not None:
rctx['ids']['ctx'] = QueryDict(template_ctx)
# if we got an id, put it in the root rule for a later global usage
if rule.id:
# use only the first word as `id` discard the rest.
rule.id = rule.id.split('#', 1)[0].strip()
rctx['ids'][rule.id] = widget.proxy_ref
# set id name as a attribute for root widget so one can in python
# code simply access root_widget.id_name
_ids = dict(rctx['ids'])
_root = _ids.pop('root')
_new_ids = _root.ids
for _key in iterkeys(_ids):
if _ids[_key] == _root:
# skip on self
continue
_new_ids[_key] = _ids[_key]
_root.ids = _new_ids
# first, ensure that the widget have all the properties used in
# the rule if not, they will be created as ObjectProperty.
rule.create_missing(widget)
# build the widget canvas
if rule.canvas_before:
with widget.canvas.before:
self._build_canvas(widget.canvas.before, widget,
rule.canvas_before, rootrule)
if rule.canvas_root:
with widget.canvas:
self._build_canvas(widget.canvas, widget,
rule.canvas_root, rootrule)
if rule.canvas_after:
with widget.canvas.after:
self._build_canvas(widget.canvas.after, widget,
rule.canvas_after, rootrule)
# create children tree
Factory_get = Factory.get
Factory_is_template = Factory.is_template
for crule in rule.children:
cname = crule.name
if cname in ('canvas', 'canvas.before', 'canvas.after'):
raise ParserException(
crule.ctx, crule.line,
'Canvas instructions added in kv must '
'be declared before child widgets.')
# depending if the child rule is a template or not, we are not
# having the same approach
cls = Factory_get(cname)
if Factory_is_template(cname):
# we got a template, so extract all the properties and
# handlers, and push them in a "ctx" dictionary.
ctx = {}
idmap = copy(global_idmap)
idmap.update({'root': rctx['ids']['root']})
if 'ctx' in rctx['ids']:
idmap.update({'ctx': rctx['ids']['ctx']})
try:
for prule in crule.properties.values():
value = prule.co_value
if type(value) is CodeType:
value = eval(value, idmap)
ctx[prule.name] = value
for prule in crule.handlers:
value = eval(prule.value, idmap)
ctx[prule.name] = value
except Exception as e:
tb = sys.exc_info()[2]
raise BuilderException(
prule.ctx, prule.line,
'{}: {}'.format(e.__class__.__name__, e), cause=tb)
# create the template with an explicit ctx
child = cls(**ctx)
widget.add_widget(child)
# reference it on our root rule context
if crule.id:
rctx['ids'][crule.id] = child
else:
# we got a "normal" rule, construct it manually
# we can't construct it without __no_builder=True, because the
# previous implementation was doing the add_widget() before
# apply(), and so, we could use "self.parent".
child = cls(__no_builder=True)
widget.add_widget(child)
child.apply_class_lang_rules(
root=rctx['ids']['root'], rule_children=rule_children)
self._apply_rule(
child, crule, rootrule, rule_children=rule_children)
if rule_children is not None:
rule_children.append(child)
# append the properties and handlers to our final resolution task
if rule.properties:
rctx['set'].append((widget.proxy_ref,
list(rule.properties.values())))
for key, crule in rule.properties.items():
# clear previously applied rules if asked
if crule.ignore_prev:
Builder.unbind_property(widget, key)
if rule.handlers:
rctx['hdl'].append((widget.proxy_ref, rule.handlers))
# if we are applying another rule that the root one, then it's done for
# us!
if rootrule is not rule:
del self.rulectx[rule]
return
# normally, we can apply a list of properties with a proper context
try:
rule = None
for widget_set, rules in reversed(rctx['set']):
for rule in rules:
assert(isinstance(rule, ParserRuleProperty))
key = rule.name
value = rule.co_value
if type(value) is CodeType:
value, bound = create_handler(
widget_set, widget_set, key, value, rule,
rctx['ids'])
# if there's a rule
if (widget_set != widget or bound or
key not in ignored_consts):
setattr(widget_set, key, value)
else:
if (widget_set != widget or
key not in ignored_consts):
setattr(widget_set, key, value)
except Exception as e:
if rule is not None:
tb = sys.exc_info()[2]
raise BuilderException(rule.ctx, rule.line,
'{}: {}'.format(e.__class__.__name__,
e), cause=tb)
raise e
# build handlers
try:
crule = None
for widget_set, rules in rctx['hdl']:
for crule in rules:
assert(isinstance(crule, ParserRuleProperty))
assert(crule.name.startswith('on_'))
key = crule.name
if not widget_set.is_event_type(key):
key = key[3:]
idmap = copy(global_idmap)
idmap.update(rctx['ids'])
idmap['self'] = widget_set.proxy_ref
if not widget_set.fbind(key, custom_callback, crule,
idmap):
raise AttributeError(key)
# hack for on_parent
if crule.name == 'on_parent':
Factory.Widget.parent.dispatch(widget_set.__self__)
except Exception as e:
if crule is not None:
tb = sys.exc_info()[2]
raise BuilderException(
crule.ctx, crule.line,
'{}: {}'.format(e.__class__.__name__, e), cause=tb)
raise e
# rule finished, forget it
del self.rulectx[rootrule]
def match(self, widget):
'''Return a list of :class:`ParserRule` objects matching the widget.
'''
cache = BuilderBase._match_cache
k = (widget.__class__, tuple(widget.cls))
if k in cache:
return cache[k]
rules = []
for selector, rule in self.rules:
if selector.match(widget):
if rule.avoid_previous_rules:
del rules[:]
rules.append(rule)
cache[k] = rules
return rules
def match_rule_name(self, rule_name):
'''Return a list of :class:`ParserRule` objects matching the widget.
'''
cache = BuilderBase._match_name_cache
rule_name = str(rule_name)
k = rule_name.lower()
if k in cache:
return cache[k]
rules = []
for selector, rule in self.rules:
if selector.match_rule_name(rule_name):
if rule.avoid_previous_rules:
del rules[:]
rules.append(rule)
cache[k] = rules
return rules
def sync(self):
'''Execute all the waiting operations, such as the execution of all the
expressions related to the canvas.
.. versionadded:: 1.7.0
'''
global _delayed_start
next_args = _delayed_start
if next_args is None:
return
while next_args is not StopIteration:
# is this try/except still needed? yes, in case widget died in this
# frame after the call was scheduled
try:
call_fn(next_args[:-1], None, None)
except ReferenceError:
pass
args = next_args
next_args = args[-1]
args[-1] = None
_delayed_start = None
def unbind_widget(self, uid):
'''Unbind all the handlers created by the KV rules of the
widget. The :attr:`kivy.uix.widget.Widget.uid` is passed here
instead of the widget itself, because Builder is using it in the
widget destructor.
This effectively clears all the KV rules associated with this widget.
For example:
.. code-block:: python
>>> w = Builder.load_string(\'''
... Widget:
... height: self.width / 2. if self.disabled else self.width
... x: self.y + 50
... \''')
>>> w.size
[100, 100]
>>> w.pos
[50, 0]
>>> w.width = 500
>>> w.size
[500, 500]
>>> Builder.unbind_widget(w.uid)
>>> w.width = 222
>>> w.y = 500
>>> w.size
[222, 500]
>>> w.pos
[50, 500]
.. versionadded:: 1.7.2
'''
if uid not in _handlers:
return
for prop_callbacks in _handlers[uid].values():
for callbacks in prop_callbacks:
for f, k, fn, bound_uid in callbacks:
if fn is None: # it's not a kivy prop.
continue
try:
f.unbind_uid(k, bound_uid)
except ReferenceError:
# proxy widget is already gone, that's cool :)
pass
del _handlers[uid]
def unbind_property(self, widget, name):
'''Unbind the handlers created by all the rules of the widget that set
the name.
This effectively clears all the rules of widget that take the form::
name: rule
For example:
.. code-block:: python
>>> w = Builder.load_string(\'''
... Widget:
... height: self.width / 2. if self.disabled else self.width
... x: self.y + 50
... \''')
>>> w.size
[100, 100]
>>> w.pos
[50, 0]
>>> w.width = 500
>>> w.size
[500, 500]
>>> Builder.unbind_property(w, 'height')
>>> w.width = 222
>>> w.size
[222, 500]
>>> w.y = 500
>>> w.pos
[550, 500]
.. versionadded:: 1.9.1
'''
uid = widget.uid
if uid not in _handlers:
return
prop_handlers = _handlers[uid]
if name not in prop_handlers:
return
for callbacks in prop_handlers[name]:
for f, k, fn, bound_uid in callbacks:
if fn is None: # it's not a kivy prop.
continue
try:
f.unbind_uid(k, bound_uid)
except ReferenceError:
# proxy widget is already gone, that's cool :)
pass
del prop_handlers[name]
if not prop_handlers:
del _handlers[uid]
def _build_canvas(self, canvas, widget, rule, rootrule):
global Instruction
if Instruction is None:
Instruction = Factory.get('Instruction')
idmap = copy(self.rulectx[rootrule]['ids'])
for crule in rule.children:
name = crule.name
if name == 'Clear':
canvas.clear()
continue
instr = Factory.get(name)()
if not isinstance(instr, Instruction):
raise BuilderException(
crule.ctx, crule.line,
'You can add only graphics Instruction in canvas.')
try:
for prule in crule.properties.values():
key = prule.name
value = prule.co_value
if type(value) is CodeType:
value, _ = create_handler(
widget, instr.proxy_ref,
key, value, prule, idmap, True)
setattr(instr, key, value)
except Exception as e:
tb = sys.exc_info()[2]
raise BuilderException(
prule.ctx, prule.line,
'{}: {}'.format(e.__class__.__name__, e), cause=tb)
#: Main instance of a :class:`BuilderBase`.
Builder = register_context('Builder', BuilderBase)
Builder.load_file(join(kivy_data_dir, 'style.kv'), rulesonly=True)
if 'KIVY_PROFILE_LANG' in environ:
import atexit
import cgi
def match_rule(fn, index, rule):
if rule.ctx.filename != fn:
return
for prop, prp in iteritems(rule.properties):
if prp.line != index:
continue
yield prp
for child in rule.children:
for r in match_rule(fn, index, child):
yield r
if rule.canvas_root:
for r in match_rule(fn, index, rule.canvas_root):
yield r
if rule.canvas_before:
for r in match_rule(fn, index, rule.canvas_before):
yield r
if rule.canvas_after:
for r in match_rule(fn, index, rule.canvas_after):
yield r
def dump_builder_stats():
html = [
'<!doctype html>'
'<html><body>',
'<style type="text/css">\n',
'pre { margin: 0; }\n',
'</style>']
files = set([x[1].ctx.filename for x in Builder.rules])
for fn in files:
try:
with open(fn) as f:
lines = f.readlines()
except (IOError, TypeError) as e:
continue
html += ['<h2>', fn, '</h2>', '<table>']
count = 0
for index, line in enumerate(lines):
line = line.rstrip()
line = cgi.escape(line)
matched_prp = []
for psn, rule in Builder.rules:
matched_prp += list(match_rule(fn, index, rule))
count = sum(set([x.count for x in matched_prp]))
color = (255, 155, 155) if count else (255, 255, 255)
html += ['<tr style="background-color: rgb{}">'.format(color),
'<td>', str(index + 1), '</td>',
'<td>', str(count), '</td>',
'<td><pre>', line, '</pre></td>',
'</tr>']
html += ['</table>']
html += ['</body></html>']
with open('builder_stats.html', 'w') as fd:
fd.write(''.join(html))
print('Profiling written at builder_stats.html')
atexit.register(dump_builder_stats)
| 37.684739 | 79 | 0.539964 |
aced612602bb0d01a5f5911d0e9a07841a4e3774 | 4,572 | py | Python | train.py | QuynhSU/Transformer-OCR | abfcb78508c5d816ba494343612269642cebfe59 | [
"MIT"
] | 336 | 2020-02-29T13:56:04.000Z | 2022-03-29T14:52:24.000Z | train.py | vedne23/Transformer-OCR | abfcb78508c5d816ba494343612269642cebfe59 | [
"MIT"
] | 21 | 2020-03-19T15:03:17.000Z | 2022-02-18T00:20:47.000Z | train.py | vedne23/Transformer-OCR | abfcb78508c5d816ba494343612269642cebfe59 | [
"MIT"
] | 89 | 2020-03-19T02:09:09.000Z | 2022-03-29T12:39:45.000Z | import torch
import torch.nn as nn
from torch.autograd import Variable
import time
from dataset import ListDataset
from dataset import char2token
from dataset import Batch
from model import make_model
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
class NoamOpt:
"Optim wrapper that implements rate."
def __init__(self, model_size, factor, warmup, optimizer):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
def step(self):
"Update parameters and rate"
self._step += 1
rate = self.rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def rate(self, step = None):
"Implement `lrate` above"
if step is None:
step = self._step
return self.factor * \
(self.model_size ** (-0.5) *
min(step ** (-0.5), step * self.warmup ** (-1.5)))
class LabelSmoothing(nn.Module):
"Implement label smoothing."
def __init__(self, size, padding_idx=0, smoothing=0.0):
super(LabelSmoothing, self).__init__()
self.criterion = nn.KLDivLoss(size_average=False)
self.padding_idx = padding_idx
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.size = size
self.true_dist = None
def forward(self, x, target):
assert x.size(1) == self.size
true_dist = x.data.clone()
true_dist.fill_(self.smoothing / (self.size - 2))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
true_dist[:, self.padding_idx] = 0
mask = torch.nonzero(target.data == self.padding_idx)
if mask.dim() > 0:
true_dist.index_fill_(0, mask.squeeze(), 0.0)
self.true_dist = true_dist
return self.criterion(x, Variable(true_dist, requires_grad=False))
class SimpleLossCompute:
"A simple loss compute and train function."
def __init__(self, generator, criterion, opt=None):
self.generator = generator
self.criterion = criterion
self.opt = opt
def __call__(self, x, y, norm):
x = self.generator(x)
loss = self.criterion(x.contiguous().view(-1, x.size(-1)),
y.contiguous().view(-1)) / norm
if self.opt is not None:
loss.backward()
self.opt.step()
self.opt.optimizer.zero_grad()
return loss.data * norm
def run_epoch(dataloader, model, loss_compute):
"Standard Training and Logging Function"
start = time.time()
total_tokens = 0
total_loss = 0
tokens = 0
for i, (imgs, labels_y, labels) in enumerate(dataloader):
batch = Batch(imgs, labels_y, labels)
out = model(batch.imgs, batch.trg, batch.src_mask, batch.trg_mask)
loss = loss_compute(out, batch.trg_y, batch.ntokens)
total_loss += loss
total_tokens += batch.ntokens
tokens += batch.ntokens
if i % 50 == 1:
elapsed = time.time() - start
print("Epoch Step: %d Loss: %f Tokens per Sec: %f" %
(i, loss / batch.ntokens, tokens / elapsed))
start = time.time()
tokens = 0
return total_loss / total_tokens
def train():
batch_size = 64
train_dataloader = torch.utils.data.DataLoader(ListDataset(['your-train-lines']), batch_size=batch_size, shuffle=True, num_workers=0)
val_dataloader = torch.utils.data.DataLoader(ListDataset('your-test-lines'), batch_size=batch_size, shuffle=False, num_workers=0)
model = make_model(len(char2token))
model.load_state_dict(torch.load('your-pretrain-model-path'))
model.cuda()
criterion = LabelSmoothing(size=len(char2token), padding_idx=0, smoothing=0.1)
criterion.cuda()
model_opt = NoamOpt(model.tgt_embed[0].d_model, 1, 2000,
torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
for epoch in range(10000):
model.train()
run_epoch(train_dataloader, model,
SimpleLossCompute(model.generator, criterion, model_opt))
model.eval()
test_loss = run_epoch(val_dataloader, model,
SimpleLossCompute(model.generator, criterion, None))
print("test_loss", test_loss)
torch.save(model.state_dict(), 'checkpoint/%08d_%f.pth'%(epoch, test_loss))
if __name__=='__main__':
train()
| 34.636364 | 137 | 0.619204 |
aced61dbc37e0077e80f278d0880b8b7041163e6 | 1,832 | py | Python | setup.py | Darylgolden/jupyter-manim | 38c655f3e04c421effde430ddf6238b28fadc581 | [
"MIT"
] | 193 | 2019-05-28T03:44:23.000Z | 2022-02-16T03:29:33.000Z | setup.py | Darylgolden/jupyter-manim | 38c655f3e04c421effde430ddf6238b28fadc581 | [
"MIT"
] | 28 | 2019-05-25T08:16:07.000Z | 2022-02-21T13:44:06.000Z | setup.py | Darylgolden/jupyter-manim | 38c655f3e04c421effde430ddf6238b28fadc581 | [
"MIT"
] | 25 | 2019-08-20T08:00:48.000Z | 2022-02-03T04:53:18.000Z | from setuptools import setup
from setuptools import find_packages
try:
from pypandoc import convert
def get_long_description(file_name):
return convert(file_name, 'rst', 'md')
except ImportError:
def get_long_description(file_name):
with open(file_name) as f:
return f.read()
if __name__ == '__main__':
setup(
name='jupyter_manim',
packages=find_packages(),
version='1.3',
license='MIT',
description='Cell magic rendering displaying videos in Jupyter/IPython',
long_description=get_long_description('README.md'),
author='Michal Krassowski',
author_email='krassowski.michal+pypi@gmail.com',
url='https://github.com/krassowski/jupyter-manim',
keywords=['jupyter', 'jupyterlab', 'notebook', 'manim', 'manimlib', 'mathematics'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Framework :: IPython',
'Framework :: Jupyter',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS',
'Topic :: Utilities',
'Topic :: Software Development :: User Interfaces',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Multimedia :: Graphics',
'Topic :: Multimedia :: Graphics :: Presentation',
'Topic :: Multimedia :: Video',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
install_requires=[
'manimlib', 'IPython'
],
)
| 34.566038 | 91 | 0.582424 |
aced62e3ced9e021a585e5261e557a0698ff2d4b | 3,361 | py | Python | 05algstruct/structs/singlecyclinklist.py | edgells/python-commons | 38c0aa0ec10304a4147ea231c92c9e34da462052 | [
"MIT"
] | null | null | null | 05algstruct/structs/singlecyclinklist.py | edgells/python-commons | 38c0aa0ec10304a4147ea231c92c9e34da462052 | [
"MIT"
] | null | null | null | 05algstruct/structs/singlecyclinklist.py | edgells/python-commons | 38c0aa0ec10304a4147ea231c92c9e34da462052 | [
"MIT"
] | null | null | null | """
单向循环列表
"""
class Node:
def __init__(self, elem):
self.elem = elem
self.next = None
class SingleCycLinkList:
def __init__(self):
self._head = None
def is_empty(self):
return self._head is None
def length(self):
cur = self._head
count = 0
while cur is not None:
count += 1
cur = cur.next
return count
def travel(self):
cur = self._head
# 如果时循环链表则最后节点的next 是指向 head 的, 所以判断下一个节点不等于head 链表就没有到头
while cur is not None:
print(cur.elem)
cur = cur.next
if cur == self._head:
break
def add(self, elem):
node = Node(elem)
# 如果头节点为none
if self.is_empty():
self._head = node
node.next = self._head
else:
node.next = self._head
cur = self._head
while cur.next != self._head:
cur = cur.next
# 将最后节点next 指向新加入的node ,形成环
cur.next = node
self._head = node
def append(self, elem):
node = Node(elem)
if self.is_empty():
self._head = node
node.next = self._head
else:
cur = self._head
while cur.next != self._head:
cur = cur.next
cur.next = node
# 将新加入的尾节点指向头节点, 形成环
node.next = self._head
def insert(self, elem, pos):
if pos <= 0:
self.add(elem)
elif pos > (self.length() - 1):
self.append(elem)
else:
node = Node(elem)
cur = self._head
count = 0
# 获取尾节点
while count < (pos - 1):
count += 1
cur = cur.next
# 将旧尾节点指向的节点交给新尾节点
node.next = cur.next
cur.next = node
def remove(self, elem):
if self.is_empty():
return
cur = self._head
pre = None
while cur.next != self._head:
if cur.elem == elem:
# 判断是否为头节点
if cur == self._head:
rear = self._head
while rear.next != self._head:
rear = rear.next
self._head = cur.next
rear.next = self._head
else:
pre.next = cur.next
return
else:
pre = cur
cur = cur.next
if cur.elem == elem:
if cur == self._head:
self._head = None
else:
pre.next = self._head
def search(self, elem):
if self.is_empty():
return False
cur = self._head
if cur.item == elem:
return True
while cur.next != self._head:
cur = cur.next
if cur.elem == elem:
return True
return False
if __name__ == '__main__':
ll = SingleCycLinkList()
ll.add(1)
ll.add(2)
ll.append(3)
ll.insert(2, 4)
ll.insert(4, 5)
ll.insert(0, 6)
print("length:", ll.length())
ll.travel()
print(ll.search(3))
print(ll.search(7))
ll.remove(1)
print("length:", ll.length())
ll.travel()
| 19.205714 | 64 | 0.446593 |
aced62f555a57ff306af5461882a34b871f86afb | 1,179 | py | Python | tests/unit_tests/test_model_evaluation.py | Kylmakalle/clarifai-python | 446ee3e410c409bc80c84d00d2b1a82465b65d95 | [
"Apache-2.0"
] | 322 | 2015-08-25T03:16:11.000Z | 2021-11-08T09:36:50.000Z | tests/unit_tests/test_model_evaluation.py | Kylmakalle/clarifai-python | 446ee3e410c409bc80c84d00d2b1a82465b65d95 | [
"Apache-2.0"
] | 76 | 2015-10-25T13:03:47.000Z | 2022-02-19T09:36:10.000Z | tests/unit_tests/test_model_evaluation.py | Kylmakalle/clarifai-python | 446ee3e410c409bc80c84d00d2b1a82465b65d95 | [
"Apache-2.0"
] | 136 | 2015-09-04T13:48:27.000Z | 2021-06-12T16:48:36.000Z | import unittest.mock as mock
from clarifai.rest import ClarifaiApp, Model
from .mock_extensions import assert_request, mock_request
@mock.patch('clarifai.rest.http_client.HttpClient')
def test_model_evaluate(mock_http_client): # type: (mock.Mock) -> None
mock_execute_request = mock_request(mock_http_client, """
{
"status": {
"code": 10000,
"description": "Ok"
},
"model_version": {
"id": "@modelVersionID",
"created_at": "2017-01-01T00:00:00.000000Z",
"status": {
"code": 21100,
"description": "Model trained successfully"
},
"active_concept_count": 2,
"metrics": {
"status": {
"code": 21303,
"description": "Model is queued for evaluation."
}
},
"total_input_count": 30
}
}
""")
app = ClarifaiApp()
model = Model(app.api, model_id='@modelID')
model.model_version = '@modelVersionID'
response = model.evaluate()
assert response['status']['code'] == 10000
assert_request(mock_execute_request, 'POST',
'/v2/models/@modelID/versions/@modelVersionID/metrics', '{}')
| 26.795455 | 78 | 0.597116 |
aced6328573480d6cae13d7b7500ce02653405cd | 2,109 | py | Python | openinghours/tests/test_settings.py | fmalina/django-openinghours | fe9321aabd8a8d6b36f7c5d96a6dfa63f899244d | [
"MIT"
] | 3 | 2016-02-14T06:57:34.000Z | 2020-03-13T20:40:10.000Z | openinghours/tests/test_settings.py | fmalina/django-openinghours | fe9321aabd8a8d6b36f7c5d96a6dfa63f899244d | [
"MIT"
] | null | null | null | openinghours/tests/test_settings.py | fmalina/django-openinghours | fe9321aabd8a8d6b36f7c5d96a6dfa63f899244d | [
"MIT"
] | 1 | 2020-08-09T19:39:37.000Z | 2020-08-09T19:39:37.000Z | """Settings that need to be set in order to run the tests."""
import os
DEBUG = True
SITE_ID = 1
TIME_ZONE = 'Europe/London'
APP_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'openinghours.db',
}
}
ROOT_URLCONF = 'openinghours.urls'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(APP_ROOT, '../app_static')
MEDIA_ROOT = os.path.join(APP_ROOT, '../app_media')
STATICFILES_DIRS = (
os.path.join(APP_ROOT, 'static'),
)
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(APP_ROOT, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
]
INTERNAL_APPS = [
'openinghours',
]
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
# run coverage with
# coverage run --source='.' manage.py test openinghours
# coverage report
SECRET_KEY = 'foobar'
| 27.38961 | 74 | 0.638217 |
aced640db6acbe38dd5fad2ab6bf6d4883deb3f8 | 224 | py | Python | src/notify.py | JakeRoggenbuck/xdotool_python_window_util | 5559b203c2759c183fd1ae6c7a2049fae6776cd3 | [
"MIT"
] | null | null | null | src/notify.py | JakeRoggenbuck/xdotool_python_window_util | 5559b203c2759c183fd1ae6c7a2049fae6776cd3 | [
"MIT"
] | 1 | 2020-07-29T21:01:05.000Z | 2020-07-29T21:01:05.000Z | src/notify.py | JakeRoggenbuck/xdotool_python_window_util | 5559b203c2759c183fd1ae6c7a2049fae6776cd3 | [
"MIT"
] | null | null | null | from main import Move
import matplotlib.pyplot as plt
import numpy as np
Fs = 2000
f = 5
sample = 80
x = np.arange(sample)
y = np.sin(2 * np.pi * f * x * 10 / Fs) * 6
for a in y:
b = Move(True, a, 0)
b.move_win()
| 14.933333 | 43 | 0.607143 |
aced64295ff802d7336e2ffc61013abeaf69deea | 76 | py | Python | tests/integrations/config/package.py | cercos/masonite | f7f220efa7fae833683e9f07ce13c3795a87d3b8 | [
"MIT"
] | 1,816 | 2018-02-14T01:59:51.000Z | 2022-03-31T17:09:20.000Z | tests/integrations/config/package.py | cercos/masonite | f7f220efa7fae833683e9f07ce13c3795a87d3b8 | [
"MIT"
] | 340 | 2018-02-11T00:27:26.000Z | 2022-03-21T12:00:24.000Z | tests/integrations/config/package.py | cercos/masonite | f7f220efa7fae833683e9f07ce13c3795a87d3b8 | [
"MIT"
] | 144 | 2018-03-18T00:08:16.000Z | 2022-02-26T01:51:58.000Z | """External package settings for tests."""
PACKAGE_PARAM = "package_value"
| 19 | 42 | 0.75 |
aced64ab0c6d315fe59d425b16ca9083b9705843 | 1,390 | py | Python | tests/providers/google/cloud/transfers/test_bigquery_to_bigquery_system.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 79 | 2021-10-15T07:32:27.000Z | 2022-03-28T04:10:19.000Z | tests/providers/google/cloud/transfers/test_bigquery_to_bigquery_system.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 210 | 2021-07-17T00:25:52.000Z | 2021-12-29T00:44:48.000Z | tests/providers/google/cloud/transfers/test_bigquery_to_bigquery_system.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 23 | 2021-10-15T02:36:37.000Z | 2022-03-17T02:59:27.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""System tests for Google Cloud Build operators"""
import pytest
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_BIGQUERY_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.system("google.cloud")
@pytest.mark.credential_file(GCP_BIGQUERY_KEY)
class BigQueryToBigQueryExampleDagsSystemTest(GoogleSystemTest):
@provide_gcp_context(GCP_BIGQUERY_KEY)
def test_run_example_dag_queries(self):
self.run_dag('example_bigquery_to_bigquery', CLOUD_DAG_FOLDER)
| 43.4375 | 103 | 0.797122 |
aced6540740c9bfeb2cea64f73d9da4c62e094b7 | 2,497 | py | Python | koans/segunda_semana/acerca_de_funciones.py | benjymb/python_koans_pachaqtec | 094265842c2e182874fbf608521dc4a30e66bfae | [
"MIT"
] | null | null | null | koans/segunda_semana/acerca_de_funciones.py | benjymb/python_koans_pachaqtec | 094265842c2e182874fbf608521dc4a30e66bfae | [
"MIT"
] | null | null | null | koans/segunda_semana/acerca_de_funciones.py | benjymb/python_koans_pachaqtec | 094265842c2e182874fbf608521dc4a30e66bfae | [
"MIT"
] | 2 | 2020-04-22T20:28:30.000Z | 2020-04-25T16:22:39.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
def mi_funcion_global(a,b):
return a + b
def funcion_que_no_retorna_nada(a, b):
resultado = a + b
def funcion_con_argumentos_que_tienen_valor_por_defecto(
a, b='Soy el valor por defecto de la variable b'
):
return [a, b]
def funcion_con_cantidad_variable_de_argumentos_no_nombrados(*args):
return args
def funcion_vacia():
"""
# RECORDAR : Se usa la palabra reservada pass para indicar
# bloques de codigo vacio.
"""
pass
def funcion_vacia_que_hace_trampa():
pass
return "XD"
class AcercaDeFunciones(Koan):
def test_llamando_a_funcion_global(self):
self.assertEqual(__, mi_funcion_global(2,3))
"""
# NOTA: Llamar a una funcion con una cantidad de parametros diferentes
# a lo esperado, no lanzara una excepcion SyntaxisError.
# Es un error en tiempo de ejecucion.
"""
def test_llamando_funciones_con_el_numero_de_parametros_inadecuados(self):
try:
mi_funcion_global()
except TypeError as exception:
mensaje = exception.args[0]
self.assertRegex(mensaje,
r'mi_funcion_global\(\) missing 2 required positional arguments')
try:
mi_funcion_global(1, 2, 3)
except Exception as e:
mensaje = e.args[0]
"""
# NOTA: Cuidado con los parentesis, necesitan ser escapados.
# r'\(\)'
"""
self.assertRegex(mensaje, __)
def test_llamando_funcion_que_no_retorna_nada(self):
self.assertEqual(__, funcion_que_no_retorna_nada(1, 2))
def test_llamando_funcion_con_argumentos_que_tienen_valor_por_defecto(self):
self.assertEqual(__, funcion_con_argumentos_que_tienen_valor_por_defecto(1))
self.assertEqual(__, funcion_con_argumentos_que_tienen_valor_por_defecto(1, 2))
def test_llamando_funcion_con_cantidad_variable_de_argumentos_no_nombrados(self):
self.assertEqual(__, funcion_con_cantidad_variable_de_argumentos_no_nombrados())
self.assertEqual(('uno',), funcion_con_cantidad_variable_de_argumentos_no_nombrados('uno'))
self.assertEqual(__, funcion_con_cantidad_variable_de_argumentos_no_nombrados('uno', 'dos'))
def test_llamando_funcion_vacia(self):
self.assertEqual(__, funcion_vacia())
def test_llamando_funcion_vacia_que_hace_trampa(self):
self.assertEqual(____, "XD" != funcion_vacia_que_hace_trampa())
| 30.45122 | 100 | 0.705246 |
aced66250d4ec93e05b2ee2705f65d78d29a9e9c | 11,251 | py | Python | shynet/core/models.py | f97/shynet | 620f8c103a06ab96141533a593a6d48e42d85727 | [
"Apache-2.0"
] | 1 | 2022-02-18T18:08:58.000Z | 2022-02-18T18:08:58.000Z | shynet/core/models.py | f97/shynet | 620f8c103a06ab96141533a593a6d48e42d85727 | [
"Apache-2.0"
] | 3 | 2021-12-08T07:51:20.000Z | 2021-12-21T06:27:27.000Z | shynet/core/models.py | f97/shynet | 620f8c103a06ab96141533a593a6d48e42d85727 | [
"Apache-2.0"
] | null | null | null | import ipaddress
import json
import re
import uuid
from django.apps import apps
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.functions import TruncDate, TruncHour
from django.db.utils import NotSupportedError
from django.shortcuts import reverse
from django.utils import timezone
# How long a session a needs to go without an update to no longer be considered 'active' (i.e., currently online)
ACTIVE_USER_TIMEDELTA = timezone.timedelta(
milliseconds=settings.SCRIPT_HEARTBEAT_FREQUENCY * 2
)
def _default_uuid():
return str(uuid.uuid4())
def _validate_network_list(networks: str):
try:
_parse_network_list(networks)
except ValueError as e:
raise ValidationError(str(e))
def _validate_regex(regex: str):
try:
re.compile(regex)
except re.error:
raise ValidationError(f"'{regex}' is not valid RegEx")
def _parse_network_list(networks: str):
if len(networks.strip()) == 0:
return []
return [ipaddress.ip_network(network.strip()) for network in networks.split(",")]
class User(AbstractUser):
username = models.TextField(default=_default_uuid, unique=True)
email = models.EmailField(unique=True)
def __str__(self):
return self.email
class Service(models.Model):
ACTIVE = "AC"
ARCHIVED = "AR"
SERVICE_STATUSES = [(ACTIVE, "Active"), (ARCHIVED, "Archived")]
uuid = models.UUIDField(default=_default_uuid, primary_key=True)
name = models.TextField(max_length=64)
owner = models.ForeignKey(
User, on_delete=models.CASCADE, related_name="owning_services"
)
collaborators = models.ManyToManyField(
User, related_name="collaborating_services", blank=True
)
created = models.DateTimeField(auto_now_add=True)
link = models.URLField(blank=True)
origins = models.TextField(default="*")
status = models.CharField(
max_length=2, choices=SERVICE_STATUSES, default=ACTIVE, db_index=True
)
respect_dnt = models.BooleanField(default=True)
ignore_robots = models.BooleanField(default=False)
collect_ips = models.BooleanField(default=True)
ignored_ips = models.TextField(
default="", blank=True, validators=[_validate_network_list]
)
hide_referrer_regex = models.TextField(
default="", blank=True, validators=[_validate_regex]
)
script_inject = models.TextField(default="", blank=True)
class Meta:
ordering = ["name", "uuid"]
def __str__(self):
return self.name
def get_ignored_networks(self):
return _parse_network_list(self.ignored_ips)
def get_ignored_referrer_regex(self):
if len(self.hide_referrer_regex.strip()) == 0:
return re.compile(r".^") # matches nothing
else:
try:
return re.compile(self.hide_referrer_regex)
except re.error:
# Regexes are validated in the form, but this is an important
# fallback to prevent form validation and malformed source
# data from causing all service pages to error
return re.compile(r".^")
def get_daily_stats(self):
return self.get_core_stats(
start_time=timezone.now() - timezone.timedelta(days=1)
)
def get_core_stats(self, start_time=None, end_time=None):
if start_time is None:
start_time = timezone.now() - timezone.timedelta(days=30)
if end_time is None:
end_time = timezone.now()
main_data = self.get_relative_stats(start_time, end_time)
comparison_data = self.get_relative_stats(
start_time - (end_time - start_time), start_time
)
main_data["compare"] = comparison_data
return main_data
def get_relative_stats(self, start_time, end_time):
Session = apps.get_model("analytics", "Session")
Hit = apps.get_model("analytics", "Hit")
tz_now = timezone.now()
currently_online = Session.objects.filter(
service=self, last_seen__gt=tz_now - ACTIVE_USER_TIMEDELTA
).count()
sessions = Session.objects.filter(
service=self, start_time__gt=start_time, start_time__lt=end_time
).order_by("-start_time")
session_count = sessions.count()
hits = Hit.objects.filter(
service=self, start_time__lt=end_time, start_time__gt=start_time
)
hit_count = hits.count()
has_hits = Hit.objects.filter(service=self).exists()
bounces = sessions.filter(is_bounce=True)
bounce_count = bounces.count()
locations = (
hits.values("location")
.annotate(count=models.Count("location"))
.order_by("-count")
)
referrer_ignore = self.get_ignored_referrer_regex()
referrers = [
referrer
for referrer in (
hits.filter(initial=True)
.values("referrer")
.annotate(count=models.Count("referrer"))
.order_by("-count")
)
if not referrer_ignore.match(referrer["referrer"])
]
countries = (
sessions.values("country")
.annotate(count=models.Count("country"))
.order_by("-count")
)
operating_systems = (
sessions.values("os").annotate(count=models.Count("os")).order_by("-count")
)
browsers = (
sessions.values("browser")
.annotate(count=models.Count("browser"))
.order_by("-count")
)
device_types = (
sessions.values("device_type")
.annotate(count=models.Count("device_type"))
.order_by("-count")
)
devices = (
sessions.values("device")
.annotate(count=models.Count("device"))
.order_by("-count")
)
avg_load_time = hits.aggregate(load_time__avg=models.Avg("load_time"))[
"load_time__avg"
]
avg_hits_per_session = hit_count / session_count if session_count > 0 else None
avg_session_duration = self._get_avg_session_duration(sessions, session_count)
chart_data, chart_tooltip_format, chart_granularity = self._get_chart_data(
sessions, hits, start_time, end_time, tz_now
)
return {
"currently_online": currently_online,
"session_count": session_count,
"hit_count": hit_count,
"has_hits": has_hits,
"bounce_rate_pct": bounce_count * 100 / session_count
if session_count > 0
else None,
"avg_session_duration": avg_session_duration,
"avg_load_time": avg_load_time,
"avg_hits_per_session": avg_hits_per_session,
"locations": locations,
"referrers": referrers,
"countries": countries,
"operating_systems": operating_systems,
"browsers": browsers,
"devices": devices,
"device_types": device_types,
"chart_data": chart_data,
"chart_tooltip_format": chart_tooltip_format,
"chart_granularity": chart_granularity,
"online": True,
}
def _get_avg_session_duration(self, sessions, session_count):
try:
avg_session_duration = sessions.annotate(
duration=models.F("last_seen") - models.F("start_time")
).aggregate(time_delta=models.Avg("duration"))["time_delta"]
except NotSupportedError:
avg_session_duration = sum(
[
(session.last_seen - session.start_time).total_seconds()
for session in sessions
]
) / max(session_count, 1)
if session_count == 0:
avg_session_duration = None
return avg_session_duration
def _get_chart_data(self, sessions, hits, start_time, end_time, tz_now):
# Show hourly chart for date ranges of 3 days or less, otherwise daily chart
if (end_time - start_time).days < 3:
chart_tooltip_format = "MM/dd HH:mm"
chart_granularity = "hourly"
sessions_per_hour = (
sessions.annotate(hour=TruncHour("start_time"))
.values("hour")
.annotate(count=models.Count("uuid"))
.order_by("hour")
)
chart_data = {
k["hour"]: {"sessions": k["count"], "hits": 0}
for k in sessions_per_hour
}
hits_per_hour = (
hits.annotate(hour=TruncHour("start_time"))
.values("hour")
.annotate(count=models.Count("id"))
.order_by("hour")
)
for k in hits_per_hour:
if k["hour"] not in chart_data:
chart_data[k["hour"]] = {"hits": k["count"], "sessions": 0}
else:
chart_data[k["hour"]]["hits"] = k["count"]
hours_range = range(int((end_time - start_time).total_seconds() / 3600) + 1)
for hour_offset in hours_range:
hour = start_time + timezone.timedelta(hours=hour_offset)
if hour not in chart_data and hour <= tz_now:
chart_data[hour] = {"sessions": 0, "hits": 0}
else:
chart_tooltip_format = "MMM d"
chart_granularity = "daily"
sessions_per_day = (
sessions.annotate(date=TruncDate("start_time"))
.values("date")
.annotate(count=models.Count("uuid"))
.order_by("date")
)
chart_data = {
k["date"]: {"sessions": k["count"], "hits": 0} for k in sessions_per_day
}
hits_per_day = (
hits.annotate(date=TruncDate("start_time"))
.values("date")
.annotate(count=models.Count("id"))
.order_by("date")
)
for k in hits_per_day:
if k["date"] not in chart_data:
chart_data[k["date"]] = {"hits": k["count"], "sessions": 0}
else:
chart_data[k["date"]]["hits"] = k["count"]
for day_offset in range((end_time - start_time).days + 1):
day = (start_time + timezone.timedelta(days=day_offset)).date()
if day not in chart_data and day <= tz_now.date():
chart_data[day] = {"sessions": 0, "hits": 0}
chart_data = sorted(chart_data.items(), key=lambda k: k[0])
chart_data = {
"sessions": [v["sessions"] for k, v in chart_data],
"hits": [v["hits"] for k, v in chart_data],
"labels": [str(k) for k, v in chart_data],
}
return chart_data, chart_tooltip_format, chart_granularity
def get_absolute_url(self):
return reverse(
"dashboard:service",
kwargs={"pk": self.pk},
)
| 34.940994 | 113 | 0.588748 |
aced66acb4c8aaa4a16390f6282d18a16efcd43a | 4,631 | py | Python | LeetCode-All-Solution/Python3/LC-0055-Jump-Game.py | YuweiYin/Algorithm_YuweiYin | 28648fac59c5a4e3c907978cbd1b3e662ba18fd5 | [
"MIT"
] | null | null | null | LeetCode-All-Solution/Python3/LC-0055-Jump-Game.py | YuweiYin/Algorithm_YuweiYin | 28648fac59c5a4e3c907978cbd1b3e662ba18fd5 | [
"MIT"
] | null | null | null | LeetCode-All-Solution/Python3/LC-0055-Jump-Game.py | YuweiYin/Algorithm_YuweiYin | 28648fac59c5a4e3c907978cbd1b3e662ba18fd5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0055-Jump-Game.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-01-25
=================================================================="""
import sys
import time
from typing import List
import collections
"""
LeetCode - 0055 - (Medium) - Jump Game
https://leetcode.com/problems/jump-game/
Description & Requirement:
You are given an integer array nums. You are initially positioned at the array's first index,
and each element in the array represents your maximum jump length at that position.
Return true if you can reach the last index, or false otherwise.
Example 1:
Input: nums = [2,3,1,1,4]
Output: true
Explanation: Jump 1 step from index 0 to 1, then 3 steps to the last index.
Example 2:
Input: nums = [3,2,1,0,4]
Output: false
Explanation: You will always arrive at index 3 no matter what. Its maximum jump length is 0,
which makes it impossible to reach the last index.
Constraints:
1 <= nums.length <= 10^4
0 <= nums[i] <= 10^5
"""
class Solution:
def canJump(self, nums: List[int]) -> bool:
# exception case
if not isinstance(nums, list) or len(nums) <= 0:
return False # Error input type
if len(nums) == 1:
return True # don't need any jump
if len(nums) == 2:
return True if nums[0] > 0 else False # only need one jump
# main method: (1. Greedy Search & Prefix Sum; 2. BFS.)
return self._canJumpGreedy(nums)
# return self._canJumpBfs(nums)
def _canJumpGreedy(self, nums: List[int]) -> bool:
len_nums = len(nums)
assert len_nums > 2
if 0 not in nums: # no 0, must be able to jump to the end
return True
cur_index = 0 # start position
max_reach_index = nums[cur_index] # init max reachable position
while cur_index <= max_reach_index: # consider each reachable position
if max_reach_index >= len_nums - 1: # able to reach the end
return True
max_reach_index = max(max_reach_index, cur_index + nums[cur_index]) # may expand the while loop boundary
cur_index += 1
return False
def _canJumpBfs(self, nums: List[int]) -> bool:
"""
TODO: TLE, need prune search tree
"""
len_nums = len(nums)
assert len_nums > 2
if 0 not in nums: # no 0, must be able to jump to the end
return True
# if guaranteed not jump to 0, then must be able to reach the end
# regard the list as a graph, if nums[i] == 2, it means there are edges: nums[i]->nums[i+1] & nums[i]->nums[i+2]
# preprocess: construct graph
edge_dict = dict({})
for idx, num in enumerate(nums):
edge_dict[idx] = []
for _n in reversed(range(1, num + 1)): # from 1, don't link itself; jump to bigger index first, so reverse.
if 0 <= idx + _n < len_nums: # avoid out of index
edge_dict[idx].append(idx + _n) # link idx -> idx + _n
# perform BFS on this graph, see if the start_pos 0 can reach to the end_pos n-1
bfs_queue = collections.deque()
bfs_queue.append(0) # start from 0
done_bfs_set = set() # to avoid repeat search
done_bfs_set.add(0)
while len(bfs_queue) > 0:
cur_index = bfs_queue.popleft()
if cur_index == len_nums - 1: # reach to the end_pos n-1
return True
assert cur_index in edge_dict
for next_index in edge_dict[cur_index]: # consider all neighbors
if next_index not in done_bfs_set: # avoid repeat search
done_bfs_set.add(next_index)
bfs_queue.append(next_index)
return False
def main():
# Example 1: Output: true
# nums = [2, 3, 1, 1, 4]
# Example 2: Output: false
# nums = [3, 2, 1, 0, 4]
# Example 3: Output: true
# nums = [2, 0, 0]
# Example 4: Output: true
nums = [5, 9, 3, 2, 1, 0, 2, 3, 3, 1, 0, 0]
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.canJump(nums)
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
| 32.612676 | 120 | 0.578709 |
aced6764a03a8a280e781dd19788609421911e0d | 2,852 | py | Python | 58837175-template-matching/template_matching.py | nathancy/stackoverflow | e9e2e2b8fba61e41526638a13ac7ada6de2d7560 | [
"MIT"
] | 3 | 2019-09-18T10:45:20.000Z | 2021-09-18T08:36:49.000Z | 58837175-template-matching/template_matching.py | nathancy/stackoverflow | e9e2e2b8fba61e41526638a13ac7ada6de2d7560 | [
"MIT"
] | 1 | 2020-03-19T15:49:31.000Z | 2020-03-30T14:54:03.000Z | 58837175-template-matching/template_matching.py | nathancy/stackoverflow | e9e2e2b8fba61e41526638a13ac7ada6de2d7560 | [
"MIT"
] | 1 | 2021-04-08T19:30:42.000Z | 2021-04-08T19:30:42.000Z | import cv2
import numpy as np
# Resizes a image and maintains aspect ratio
def maintain_aspect_ratio_resize(image, width=None, height=None, inter=cv2.INTER_AREA):
# Grab the image size and initialize dimensions
dim = None
(h, w) = image.shape[:2]
# Return original image if no need to resize
if width is None and height is None:
return image
# We are resizing height if width is none
if width is None:
# Calculate the ratio of the height and construct the dimensions
r = height / float(h)
dim = (int(w * r), height)
# We are resizing width if height is none
else:
# Calculate the ratio of the 0idth and construct the dimensions
r = width / float(w)
dim = (width, int(h * r))
# Return the resized image
return cv2.resize(image, dim, interpolation=inter)
# Load template, convert to grayscale, perform canny edge detection
template = cv2.imread('template.png')
template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
template = cv2.Canny(template, 50, 200)
(tH, tW) = template.shape[:2]
cv2.imshow("template", template)
# Load original image, convert to grayscale
original_image = cv2.imread('1.png')
gray = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
found = None
# Dynamically rescale image for better template matching
for scale in np.linspace(0.1, 3.0, 20)[::-1]:
# Resize image to scale and keep track of ratio
resized = maintain_aspect_ratio_resize(gray, width=int(gray.shape[1] * scale))
r = gray.shape[1] / float(resized.shape[1])
# Stop if template image size is larger than resized image
if resized.shape[0] < tH or resized.shape[1] < tW:
break
# Detect edges in resized image and apply template matching
canny = cv2.Canny(resized, 50, 200)
detected = cv2.matchTemplate(canny, template, cv2.TM_CCOEFF)
(_, max_val, _, max_loc) = cv2.minMaxLoc(detected)
# Uncomment this section for visualization
'''
clone = np.dstack([canny, canny, canny])
cv2.rectangle(clone, (max_loc[0], max_loc[1]), (max_loc[0] + tW, max_loc[1] + tH), (0,255,0), 2)
cv2.imshow('visualize', clone)
cv2.waitKey(0)
'''
# Keep track of correlation value
# Higher correlation means better match
if found is None or max_val > found[0]:
found = (max_val, max_loc, r)
# Compute coordinates of bounding box
(_, max_loc, r) = found
(start_x, start_y) = (int(max_loc[0] * r), int(max_loc[1] * r))
(end_x, end_y) = (int((max_loc[0] + tW) * r), int((max_loc[1] + tH) * r))
# Draw bounding box on ROI
cv2.rectangle(original_image, (start_x, start_y), (end_x, end_y), (0,255,0), 2)
cv2.imshow('detected', original_image)
cv2.imwrite('detected.png', original_image)
cv2.waitKey(0)
| 36.101266 | 101 | 0.65533 |
aced678e87940e191d3179afa615150ddfda1edd | 1,263 | py | Python | 60-69/65_Valid Number.py | yanchdh/LeetCode | ec60364082ad246390cf3292090d23f1c7dd08b4 | [
"BSD-2-Clause"
] | 2 | 2018-01-12T08:45:08.000Z | 2018-01-15T13:29:56.000Z | 60-69/65_Valid Number.py | yanchdh/LeetCode | ec60364082ad246390cf3292090d23f1c7dd08b4 | [
"BSD-2-Clause"
] | null | null | null | 60-69/65_Valid Number.py | yanchdh/LeetCode | ec60364082ad246390cf3292090d23f1c7dd08b4 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding:utf-8 -*-
# https://leetcode.com/problems/valid-number/description/
class Solution(object):
def isNumber(self, s):
"""
:type s: str
:rtype: bool
"""
SIGN = '+-'
first, last = 0, len(s) - 1
while first <= last and s[first] == ' ':
first += 1
while last >= first and s[last] == ' ':
last -= 1
if first > last:
return False
if s[first] in SIGN:
first += 1
if s[first] == 'e' or s[last] == 'e' or s[last] in SIGN:
return False
point, e = -1, -1
i = first
while i <= last:
if s[i] == 'e':
if e != -1:
return False
e = i
if i + 1 <= last and s[i + 1] in SIGN:
i += 1
elif s[i] == '.':
if point != -1 or e != -1:
return False
point = i
elif s[i] < '0' or s[i] > '9':
return False
i += 1
if point == first and (point + 1 == e or point == last):
return False
return True | 26.3125 | 64 | 0.354711 |
aced679df9b0c81e82c67f3b5056a40f4bc34322 | 16,896 | py | Python | cms/tests/menu_page_viewperm_staff.py | HiddenClever/django-cms | 8176bb37fd69f865c5b89fda931021a5dbf31549 | [
"BSD-3-Clause"
] | null | null | null | cms/tests/menu_page_viewperm_staff.py | HiddenClever/django-cms | 8176bb37fd69f865c5b89fda931021a5dbf31549 | [
"BSD-3-Clause"
] | null | null | null | cms/tests/menu_page_viewperm_staff.py | HiddenClever/django-cms | 8176bb37fd69f865c5b89fda931021a5dbf31549 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import with_statement
from cms.tests.menu_page_viewperm import ViewPermissionTests
from cms.compat import get_user_model
class ViewPermissionComplexMenuStaffNodeTests(ViewPermissionTests):
"""
Test CMS_PUBLIC_FOR=staff group access and menu nodes rendering
"""
settings_overrides = {
'CMS_PERMISSION': True,
'CMS_PUBLIC_FOR': 'staff',
}
def test_public_pages_anonymous_norestrictions(self):
"""
All pages are INVISIBLE to an anonymous user
"""
all_pages = self._setup_tree_pages()
granted = []
self.assertGrantedVisibility(all_pages, granted)
def test_public_menu_anonymous_user(self):
"""
Anonymous sees nothing, as he is no staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = []
self.assertGrantedVisibility(all_pages, granted)
def test_node_staff_access_page_and_children_group_1(self):
"""
simulate behaviour of group b member
group_b_ACCESS_PAGE_AND_CHILDREN to page_b
staff user
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b',
'page_b_a',
'page_b_b',
'page_b_c',
'page_b_d',
'page_c',
'page_c_a',
'page_c_b',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_1')
# user 1 is member of group_b_access_page_and_children
user = get_user_model().objects.get(username='user_1')
urls = self.get_url_dict(all_pages)
# call /
self.assertViewAllowed(urls["/en/page_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_c/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
def test_node_staff_access_page_and_children_group_1_no_staff(self):
"""
simulate behaviour of group b member
group_b_ACCESS_PAGE_AND_CHILDREN to page_b
no staff user
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = [
'page_b',
'page_b_a',
'page_b_b',
'page_b_c',
'page_b_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_1_nostaff')
user = get_user_model().objects.get(username='user_1_nostaff')
urls = self.get_url_dict(all_pages)
self.assertViewAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
def test_node_staff_access_children_group_2(self):
"""
simulate behaviour of group 2 member
GROUPNAME_2 = 'group_b_b_ACCESS_CHILDREN'
to page_b_b and user is staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b_b_a',
'page_b_b_b',
'page_b_b_c',
'page_c',
'page_c_a',
'page_c_b',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_2')
user = get_user_model().objects.get(username='user_2')
urls = self.get_url_dict(all_pages)
self.assertViewNotAllowed(urls['/en/page_b/'], user)
self.assertViewNotAllowed(urls['/en/page_b/page_b_b/'], user)
self.assertViewAllowed(urls['/en/page_b/page_b_b/page_b_b_a/'], user)
self.assertViewAllowed(urls['/en/page_b/page_b_b/page_b_b_b/'], user)
self.assertViewNotAllowed(urls['/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/'], user)
self.assertViewNotAllowed(urls['/en/page_d/'], user)
self.assertViewAllowed(urls['/en/page_d/page_d_a/'], user)
#
def test_node_staff_access_children_group_2_nostaff(self):
"""
simulate behaviour of group 2 member
GROUPNAME_2 = 'group_b_b_ACCESS_CHILDREN'
to page_b_b and user is no staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_b_b_a',
'page_b_b_b',
'page_b_b_c',
]
self.assertGrantedVisibility(all_pages, granted, username='user_2_nostaff')
user = get_user_model().objects.get(username='user_2_nostaff')
urls = self.get_url_dict(all_pages)
# member of group that has access to this page
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
def test_node_staff_access_page_and_descendants_group_3(self):
"""
simulate behaviour of group 3 member
group_b_ACCESS_PAGE_AND_DESCENDANTS to page_b
and user is staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b',
'page_b_a',
'page_b_b',
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
'page_b_c',
'page_b_d',
'page_b_d_a',
'page_b_d_b',
'page_b_d_c',
'page_c',
'page_c_a',
'page_c_b',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_3')
user = get_user_model().objects.get(username='user_3')
urls = self.get_url_dict(all_pages)
url = self.get_pages_root()
self.assertViewAllowed(urls[url], user)
self.assertViewAllowed(urls["/en/page_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_c/"], user)
def test_node_staff_access_page_and_descendants_group_3_nostaff(self):
"""
simulate behaviour of group 3 member
group_b_ACCESS_PAGE_AND_DESCENDANTS to page_b
user is not staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_b',
'page_b_a',
'page_b_b',
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
'page_b_c',
'page_b_d',
'page_b_d_a',
'page_b_d_b',
'page_b_d_c',
]
self.assertGrantedVisibility(all_pages, granted, username='user_3_nostaff')
user = get_user_model().objects.get(username='user_3_nostaff')
urls = self.get_url_dict(all_pages)
# call /
url = self.get_pages_root()
self.assertViewNotAllowed(urls[url], user)
self.assertViewAllowed(urls["/en/page_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_c/"], user)
def test_node_staff_access_descendants_group_4(self):
"""
simulate behaviour of group 4 member
group_b_b_ACCESS_DESCENDANTS to page_b_b
user is staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
'page_c',
'page_c_a',
'page_c_b',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_4')
user = get_user_model().objects.get(username='user_4')
urls = self.get_url_dict(all_pages)
# call /
url = self.get_pages_root()
self.assertViewAllowed(urls[url], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_d/"], user)
# not a direct child
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_c/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_d/"], user)
def test_node_staff_access_descendants_group_4_nostaff(self):
"""
simulate behaviour of group 4 member
group_b_b_ACCESS_DESCENDANTS to page_b_b
user is no staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = [
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
]
self.assertGrantedVisibility(all_pages, granted, username='user_4_nostaff')
user = get_user_model().objects.get(username='user_4_nostaff')
urls = self.get_url_dict(all_pages)
url = self.get_pages_root()
self.assertViewNotAllowed(urls[url], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_d/"], user)
def test_node_staff_access_page_group_5(self):
"""
simulate behaviour of group b member
group_d_ACCESS_PAGE to page_d
user is staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_c',
'page_c_a',
'page_c_b',
'page_d',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_5')
user = get_user_model().objects.get(username='user_5')
urls = self.get_url_dict(all_pages)
url = self.get_pages_root()
self.assertViewAllowed(urls[url], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewAllowed(urls["/en/page_c/"], user)
self.assertViewAllowed(urls["/en/page_d/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
def test_node_staff_access_page_group_5_nostaff(self):
"""
simulate behaviour of group b member
group_d_ACCESS_PAGE to page_d
nostaff user
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_d',]
self.assertGrantedVisibility(all_pages, granted, username='user_5_nostaff')
user = get_user_model().objects.get(username='user_5_nostaff')
urls = self.get_url_dict(all_pages)
url = self.get_pages_root()
self.assertViewNotAllowed(urls[url], user)
self.assertViewAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_d/"], user)
| 43.658915 | 93 | 0.604107 |
aced67fc8388ea0bb2b8b1640d77b2c7bf6f246c | 4,450 | py | Python | OttBands1min/strategies/OttBands1min/__init__.py | ysdede/jesse_strategies | ade9f4ba42cec11207c766d267b9d8feb8bce648 | [
"CC0-1.0"
] | 38 | 2021-09-18T15:33:28.000Z | 2022-02-21T17:29:08.000Z | OttBands1min/strategies/OttBands1min/__init__.py | ysdede/jesse_strategies | ade9f4ba42cec11207c766d267b9d8feb8bce648 | [
"CC0-1.0"
] | 4 | 2022-01-02T14:46:12.000Z | 2022-02-16T18:39:41.000Z | OttBands1min/strategies/OttBands1min/__init__.py | ysdede/jesse_strategies | ade9f4ba42cec11207c766d267b9d8feb8bce648 | [
"CC0-1.0"
] | 11 | 2021-10-19T06:21:43.000Z | 2022-02-21T17:29:10.000Z | import custom_indicators as cta
import numpy as np
from jesse import utils
from jesse.services.selectors import get_all_trading_routes
from jesse.strategies import Strategy, cached
from vars import tp_qtys
class OttBands1min(Strategy):
def __init__(self):
super().__init__()
# self.profit_levels = (0.01, 0.02, 0.03, 0.05, 0.08)
# self.profit_levels = (0.01, 0.02, 0.04, 0.08, 0.16)
self.profit_levels = (0.005, 0.01, 0.02, 0.04, 0.08)
self.ott_ma_type = 'kama'
def hyperparameters(self):
return [
{'name': 'ott_len', 'type': int, 'min': 2, 'max': 52, 'default': 18},
{'name': 'ott_percent', 'type': int, 'min': 10, 'max': 310, 'default': 200},
{'name': 'ott_bw', 'type': int, 'min': 10, 'max': 400, 'default': 80},
{'name': 'tps_qty_index', 'type': int, 'min': 0, 'max': 1000, 'default': 241},
]
@property
@cached
def ott_len(self):
return self.hp['ott_len']
@property
@cached
def ott_percent(self):
return self.hp['ott_percent'] / 100
@property
@cached
def ott(self):
return cta.ott(self.candles[-240:, 2], self.ott_len, self.ott_percent, ma_type=self.ott_ma_type, sequential=True)
@property
@cached
def ott_upper_band(self):
multiplier = 1 + (self.hp['ott_bw'] / 10000)
return np.multiply(self.ott.ott, multiplier)
@property
@cached
def ott_lower_band(self):
multiplier = 1 - (self.hp['ott_bw'] / 10000)
return np.multiply(self.ott.ott, multiplier)
@property
@cached
def cross_up_upper_band(self):
return utils.crossed(self.ott.mavg, self.ott_upper_band, direction='above', sequential=False)
@property
@cached
def cross_down_upper_band(self):
return utils.crossed(self.ott.mavg, self.ott_upper_band, direction='below', sequential=False)
@property
@cached
def cross_down_lower_band(self):
return utils.crossed(self.ott.mavg, self.ott_lower_band, direction='below', sequential=False)
@property
@cached
def cross_up_lower_band(self):
return utils.crossed(self.ott.mavg, self.ott_lower_band, direction='above', sequential=False)
@property
@cached
def cross_up(self):
return utils.crossed(self.ott.mavg, self.ott.ott, direction='above', sequential=False)
@property
@cached
def cross_down(self):
return utils.crossed(self.ott.mavg, self.ott.ott, direction='below', sequential=False)
@property
@cached
def pos_size_in_usd(self):
return self.capital / 10 # (len(get_all_trading_routes()) * 2)
@property
@cached
def calc_long_stop(self):
return self.ott.ott[-1]
@property
@cached
def calc_short_stop(self):
return self.ott.ott[-1]
def should_long(self) -> bool:
return self.cross_up_upper_band
def should_short(self) -> bool:
return self.cross_down_lower_band
@property
@cached
def pos_size(self):
return utils.size_to_qty(self.pos_size_in_usd, self.price, fee_rate=self.fee_rate) * self.leverage
def go_long(self):
self.buy = self.pos_size, self.price
def go_short(self):
self.sell = self.pos_size, self.price
def on_open_position(self, order):
qty = self.position.qty
tps = []
qty_curve = tp_qtys[self.hp['tps_qty_index']]
if self.is_long:
# sl = self.calc_long_stop
# self.stop_loss = self.position.qty, sl
for index, _qty in enumerate(qty_curve):
if _qty > 0:
tps.append(
((qty * _qty), self.position.entry_price * (1 + (self.profit_levels[index]))))
if self.is_short:
# sl = self.calc_short_stop
# self.stop_loss = self.position.qty, sl
for index, _qty in enumerate(qty_curve):
if _qty > 0:
tps.append(
((qty * _qty), self.position.entry_price * (1 - (self.profit_levels[index]))))
self.take_profit = tps
@cached
def update_position(self):
if self.is_long and self.cross_down: # _upper_band:
self.liquidate()
if self.is_short and self.cross_up: # self.cross_up_lower_band:
self.liquidate()
def should_cancel(self) -> bool:
return True
| 29.666667 | 121 | 0.610337 |
aced68dd5f79b0f65a3685bae3de4c1bd19d1614 | 490 | py | Python | wsgi.py | roberzguerra/rover | 14b6a7a47e75d6b6f8ca44fc0eb1cca500e0eecb | [
"BSD-3-Clause"
] | 2 | 2015-12-02T17:26:12.000Z | 2015-12-03T00:43:14.000Z | wsgi.py | roberzguerra/rover | 14b6a7a47e75d6b6f8ca44fc0eb1cca500e0eecb | [
"BSD-3-Clause"
] | 1 | 2015-12-02T17:26:43.000Z | 2016-03-15T00:01:20.000Z | wsgi.py | roberzguerra/rover | 14b6a7a47e75d6b6f8ca44fc0eb1cca500e0eecb | [
"BSD-3-Clause"
] | null | null | null | """
WSGI config for scout_mez project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from mezzanine.utils.conf import real_project_name
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"%s.settings" % real_project_name("rover"))
application = get_wsgi_application()
| 25.789474 | 78 | 0.755102 |
aced6915eecd29435b8804516036fb59a31a6eee | 1,197 | py | Python | ball_possession/graphics/plot_region.py | arthurdemarchi/FuzzyBallPossession | f7bf35ae8266f5ad5b312312a0f2702f55131678 | [
"MIT"
] | null | null | null | ball_possession/graphics/plot_region.py | arthurdemarchi/FuzzyBallPossession | f7bf35ae8266f5ad5b312312a0f2702f55131678 | [
"MIT"
] | null | null | null | ball_possession/graphics/plot_region.py | arthurdemarchi/FuzzyBallPossession | f7bf35ae8266f5ad5b312312a0f2702f55131678 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from ball_possession.fuzzy.region import Region
def plot_region(region,
title,
x_label='x',
y_label='u(x)',
write=False,
file='figura.png'):
#scatter if to plot one region
if type(region) == Region:
x = []
y = []
delta_x = region.init
for item in region.fuzzy:
x.append(item['x'])
y.append(item['u'])
plt.scatter(x, y, s=0.1, alpha=1)
#scatter if to plot multiple regions
elif type(region) == list:
delta_x = region[0].init
for each_region in region:
x = []
y = []
for item in each_region.fuzzy:
x.append(item['x'])
y.append(item['u'])
plt.scatter(x, y, s=0.1, alpha=1)
#prepare plot titles and lables
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.xlim(left=delta_x)
plt.ylim(bottom=0)
#write to disk
if write:
plt.savefig(file)
plt.close()
#show in GUI
else:
plt.show(block=False)
plt.pause(3)
plt.close()
| 23.94 | 47 | 0.509607 |
aced6a534f99e25f26a726135441cf79eca58db2 | 8,543 | py | Python | lib/_panel.py | nepia11/Blender-AnimeHairSupporter | 017b2a6fceaf9934981ed2faaf8161d67a062a54 | [
"Apache-2.0"
] | 31 | 2019-02-22T13:38:38.000Z | 2021-12-17T02:53:09.000Z | lib/_panel.py | nepia11/Blender-AnimeHairSupporter | 017b2a6fceaf9934981ed2faaf8161d67a062a54 | [
"Apache-2.0"
] | 3 | 2021-01-25T08:37:49.000Z | 2021-01-27T08:42:59.000Z | lib/_panel.py | nepia11/Blender-AnimeHairSupporter | 017b2a6fceaf9934981ed2faaf8161d67a062a54 | [
"Apache-2.0"
] | 3 | 2019-06-30T15:50:18.000Z | 2021-01-25T08:35:17.000Z | import bpy
from . import _common
class VIEW3D_PT_tools_anime_hair_supporter(bpy.types.Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = _common.region()
bl_category = 'Tools'
bl_context = 'objectmode'
bl_label = "アニメ髪支援"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
props = context.scene.ahs_props
# コンバーターズ
column = self.layout.column(align=True)
row = column.row(align=True)
row.operator('object.ahs_convert_edgemesh_to_curve', icon='IPO_EASE_IN_OUT')
row.enabled = bool(len([o for o in context.selected_objects if o.type == 'MESH']))
row = column.row(align=True)
row.operator('object.ahs_convert_curve_to_edgemesh', icon='IPO_CONSTANT')
row.enabled = bool(len([o for o in context.selected_objects if o.type == 'CURVE']))
# メインカーブ
box = self.layout.box()
row = box.row(align=True)
row.prop(props, 'maincurve_expand', text="メインカーブ", icon='MOD_CURVE', emboss=False)
row.operator('object.ahs_maincurve_activate', text="", icon='ZOOM_SELECTED')
row.label(text="", icon='BLANK1')
if props.maincurve_expand:
# 肉付け関係
row = box.row(align=True)
row.operator('object.ahs_maincurve_volume_up', icon='MESH_CAPSULE')
row.operator('object.ahs_maincurve_volume_down', text="", icon='X')
column = box.column(align=True)
# 余剰変形
column.operator('object.ahs_maincurve_extra_deform', icon='PARTICLE_PATH')
# グラデーションひねり
column.operator('object.ahs_maincurve_gradation_tilt', icon='FORCE_MAGNETIC')
# サブツール
column = box.column(align=True)
row = column.row(align=True)
row.operator('object.ahs_maincurve_select', icon='RESTRICT_SELECT_OFF')
row.operator('object.ahs_maincurve_hide', text="表示", icon='HIDE_OFF').is_hide = False
row.operator('object.ahs_maincurve_hide', text="隠す", icon='HIDE_ON').is_hide = True
# 解像度
row = column.row(align=True)
try:
is_successed = context.active_object.data.taper_object and context.active_object.data.bevel_object and context.active_object.data.splines.active
except:
is_successed = False
if is_successed:
row.prop(context.active_object.data.splines.active, 'resolution_u', text="解像度")
else:
row.label(text="解像度:")
row.operator('object.ahs_maincurve_set_resolution', text="", icon='PREFERENCES')
# 次数
row = column.row(align=True)
try:
is_successed = context.active_object.data.taper_object and context.active_object.data.bevel_object and context.active_object.data.splines.active
except:
is_successed = False
if is_successed:
row.prop(context.active_object.data.splines.active, 'order_u', text="次数")
else:
row.label(text="次数:")
row.operator('object.ahs_maincurve_set_order', text="", icon='PREFERENCES')
# テーパーカーブ
box = self.layout.box()
row = box.row(align=True)
row.prop(props, 'tapercurve_expand', text="テーパーカーブ", icon='CURVE_NCURVE', emboss=False)
row.operator('object.ahs_tapercurve_activate', text="", icon='ZOOM_SELECTED').mode = 'TAPER'
row.operator('object.ahs_tapercurve_id_singlize', text="", icon='COPY_ID')
if props.tapercurve_expand:
# 種類を変更とか
row = _common.box_split(box, 0.6, False)
op = row.operator('object.ahs_tapercurve_change_type', icon='HAND')
op.is_taper, op.is_bevel = True, False
op = row.operator('object.ahs_tapercurve_mirror', icon='MOD_MIRROR')
op.mode, op.is_mirror_x, op.is_mirror_y = 'TAPER', False, True
# 位置を再設定とか
row = box.row(align=False)
row.operator('object.ahs_tapercurve_relocation', icon='PARTICLE_TIP').mode = 'BOTH'
row.operator('object.ahs_tapercurve_remove_alones', icon='X').mode = 'BOTH'
# サブツール
column = box.column(align=True)
row = column.row(align=True)
row.operator('object.ahs_tapercurve_select', icon='RESTRICT_SELECT_OFF').mode = 'TAPER'
op = row.operator('object.ahs_tapercurve_hide', text="表示", icon='HIDE_OFF')
op.mode, op.is_hide = 'TAPER', False
op = row.operator('object.ahs_tapercurve_hide', text="隠す", icon='HIDE_ON')
op.mode, op.is_hide = 'TAPER', True
# 解像度
row = column.row(align=True)
try:
row.prop(context.active_object.data.taper_object.data.splines.active, 'resolution_u', text="解像度")
is_successed = True
except:
is_successed = False
if not is_successed:
taper_objects = [c.taper_object for c in context.blend_data.curves if c.taper_object]
try:
if context.active_object in taper_objects:
row.prop(context.active_object.data.splines.active, 'resolution_u', text="解像度")
is_successed = True
except:
is_successed = False
if not is_successed:
row.label(text="解像度:")
row.operator('object.ahs_maincurve_set_resolution', text="", icon='PREFERENCES')
# ベベルカーブ
box = self.layout.box()
row = box.row(align=True)
row.prop(props, 'bevelcurve_expand', text="ベベルカーブ", icon='SURFACE_NCIRCLE', emboss=False)
row.operator('object.ahs_tapercurve_activate', text="", icon='ZOOM_SELECTED').mode = 'BEVEL'
row.operator('object.ahs_tapercurve_id_singlize', text="", icon='COPY_ID')
if props.bevelcurve_expand:
# 種類を変更とか
row = _common.box_split(box, 0.6, False)
op = row.operator('object.ahs_tapercurve_change_type', icon='HAND')
op.is_taper, op.is_bevel = False, True
op = row.operator('object.ahs_tapercurve_mirror', icon='MOD_MIRROR')
op.mode, op.is_mirror_x, op.is_mirror_y = 'BEVEL', True, False
# 位置を再設定とか
row = box.row(align=False)
row.operator('object.ahs_tapercurve_relocation', icon='PARTICLE_TIP').mode = 'BOTH'
row.operator('object.ahs_tapercurve_remove_alones', icon='X').mode = 'BOTH'
# サブツール
column = box.column(align=True)
row = column.row(align=True)
row.operator('object.ahs_tapercurve_select', icon='RESTRICT_SELECT_OFF').mode = 'BEVEL'
op = row.operator('object.ahs_tapercurve_hide', text="表示", icon='HIDE_OFF')
op.mode, op.is_hide = 'BEVEL', False
op = row.operator('object.ahs_tapercurve_hide', text="隠す", icon='HIDE_ON')
op.mode, op.is_hide = 'BEVEL', True
# 解像度
row = column.row(align=True)
try:
row.prop(context.active_object.data.bevel_object.data.splines.active, 'resolution_u', text="解像度")
is_successed = True
except:
is_successed = False
if not is_successed:
bevel_objects = [c.bevel_object for c in context.blend_data.curves if c.bevel_object]
try:
if context.active_object in bevel_objects:
row.prop(context.active_object.data.splines.active, 'resolution_u', text="解像度")
is_successed = True
except:
is_successed = False
if not is_successed:
row.label(text="解像度:")
row.operator('object.ahs_maincurve_set_resolution', text="", icon='PREFERENCES')
# コンバーターズ
row = self.layout.row(align=True)
row.operator('object.ahs_convert_curve_to_armature', icon='ARMATURE_DATA')
row.enabled = bool(len([o for o in context.selected_objects if o.type == 'CURVE']))
row = self.layout.row(align=True)
row.operator('object.ahs_convert_curve_to_mesh', icon='MESH_UVSPHERE')
for ob in context.selected_objects:
if ob.type != 'CURVE':
continue
if ob.data.taper_object and ob.data.bevel_object:
row.enabled = True
break
else:
row.enabled = False
| 45.684492 | 160 | 0.596863 |
aced6b9081c62848251507be711503b77515874e | 2,558 | py | Python | blog/views.py | clovisouza/tastebud | 1f1ee9522f0f6ffee35db147b47948ae7ee6269e | [
"BSD-3-Clause"
] | null | null | null | blog/views.py | clovisouza/tastebud | 1f1ee9522f0f6ffee35db147b47948ae7ee6269e | [
"BSD-3-Clause"
] | null | null | null | blog/views.py | clovisouza/tastebud | 1f1ee9522f0f6ffee35db147b47948ae7ee6269e | [
"BSD-3-Clause"
] | null | null | null | from django.http import HttpResponse
from django.template import Context, loader
# need to import the settings from the module...this sucks. It means that
# the top module needs to be in the import path. This is confusing.
from django.conf import settings
blog = __import__("%s.blog.models" % settings.MODULE_NAME,globals(),locals(),['*'])
# blog models become blog.BlogEntry etc etc
from datetime import datetime as d
def standard_context():
""" news up and returns a Context """
c = Context({
'site_name' :settings.SITE_NAME,
'feed_url' :settings.FEED_URL,
'title' :settings.SITE_NAME
})
return c
def front_page(request):
"""The index of the site. If there's a static page with the slug of main, display that, otherwise
show the blog_latest view."""
try:
main_page = blog.Page.objects.get(show_on_main_page=True)
return page(request, main_page.slug, main_page)
except:
return blog_latest(request)
def page(request,slug,page=None):
"""Display a page by slug, also has the option of passing in a page already pulled from the database."""
c = standard_context()
t = loader.get_template("page.html")
if page:
c['title'] = page.title
c['page'] = page
return HttpResponse(t.render(c))
else:
c['page'] = blog.Page.objects.all().filter(slug=slug)[0]
c['title'] = c['page'].title
return HttpResponse(t.render(c))
def blog_latest(request):
c = standard_context()
t = loader.get_template("blog.html")
c['entries'] = blog.BlogEntry.objects.all().filter(date_added__lte=d.now()).order_by('-date_added')[0:9]
return HttpResponse(t.render(c))
def blog_archive(request):
c = standard_context()
t = loader.get_template("blog_archive.html")
c['entries'] = blog.BlogEntry.objects.all().filter(date_added__lte=d.now()).order_by('-date_added')
return HttpResponse(t.render(c))
def blog_entry(request, slug):
c = standard_context()
t = loader.get_template("blog.html")
c['entries'] = blog.BlogEntry.objects.filter(slug=slug)
c['title'] = c['entries'][0].title
return HttpResponse(t.render(c))
def category_listing(request, slug):
c = standard_context()
t = loader.get_template("category.html")
c['category'] = blog.Category.objects.filter(slug=slug)[0]
c['entries'] = blog.BlogEntry.objects.filter(categories__slug__exact=slug).order_by('date_added')
c['title'] = c['category'].name
return HttpResponse(t.render(c))
| 37.072464 | 108 | 0.6724 |
aced6c05dee13bc1b68d1f8deb7368e97f3fe11f | 49,648 | py | Python | pywikibot/tools/__init__.py | valhallasw/pywikibot-core | 32a8c3c1298a5cb077381fe202daefde82c1c5d3 | [
"MIT"
] | null | null | null | pywikibot/tools/__init__.py | valhallasw/pywikibot-core | 32a8c3c1298a5cb077381fe202daefde82c1c5d3 | [
"MIT"
] | null | null | null | pywikibot/tools/__init__.py | valhallasw/pywikibot-core | 32a8c3c1298a5cb077381fe202daefde82c1c5d3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Miscellaneous helper functions (not wiki-dependent)."""
#
# (C) Pywikibot team, 2008-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import print_function, unicode_literals
__version__ = '$Id: aced6c05dee13bc1b68d1f8deb7368e97f3fe11f $'
import bz2
import collections
import gzip
import inspect
import re
import subprocess
import sys
import threading
import time
import types
from distutils.version import Version
from warnings import warn
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if not PY2:
import queue as Queue
basestring = (str,)
unicode = str
else:
import Queue
def print_debug(msg, *args, **kwargs):
"""Simple debug routine."""
print(msg)
# This variable uses the builtin print function.
# pywikibot updates it to use logging in bot.init_handlers()
debug = print_debug
class _NotImplementedWarning(RuntimeWarning):
"""Feature that is no longer implemented."""
pass
class NotImplementedClass(object):
"""No implementation is available."""
def __init__(self, *args, **kwargs):
"""Constructor."""
raise NotImplementedError(
'%s: %s' % (self.__class__.__name__, self.__doc__))
if PYTHON_VERSION < (2, 7):
try:
import future.backports.misc
except ImportError:
warn("""
pywikibot support of Python 2.6 relies on package future for many features.
Please upgrade to Python 2.7+ or Python 3.3+, or run:
"pip install future>=0.15.0"
""", RuntimeWarning)
try:
from ordereddict import OrderedDict
except ImportError:
class OrderedDict(NotImplementedClass):
"""OrderedDict not found."""
pass
try:
from counter import Counter
except ImportError:
class Counter(NotImplementedClass):
"""Counter not found."""
pass
else:
Counter = future.backports.misc.Counter
OrderedDict = future.backports.misc.OrderedDict
try:
count = future.backports.misc.count
except AttributeError:
warn('Please update the "future" package to at least version '
'0.15.0 to use its count.', RuntimeWarning, 2)
def count(start=0, step=1):
"""Backported C{count} to support keyword arguments and step."""
while True:
yield start
start += step
del future
else:
from collections import Counter # noqa ; unused
from collections import OrderedDict
from itertools import count # noqa ; unused
def empty_iterator():
# http://stackoverflow.com/a/13243870/473890
"""An iterator which does nothing."""
return
yield
class UnicodeMixin(object):
"""Mixin class to add __str__ method in Python 2 or 3."""
if not PY2:
def __str__(self):
"""Return the unicode representation as the str representation."""
return self.__unicode__()
else:
def __str__(self):
"""Return the str representation of the UTF-8 encoded Unicode."""
return self.__unicode__().encode('utf8')
# From http://python3porting.com/preparing.html
class ComparableMixin(object):
"""Mixin class to allow comparing to other objects which are comparable."""
def __lt__(self, other):
"""Compare if other is less than self."""
return other >= self._cmpkey()
def __le__(self, other):
"""Compare if other is less equals self."""
return other > self._cmpkey()
def __eq__(self, other):
"""Compare if other is equal to self."""
return other == self._cmpkey()
def __ge__(self, other):
"""Compare if other is greater equals self."""
return other < self._cmpkey()
def __gt__(self, other):
"""Compare if other is greater than self."""
return other <= self._cmpkey()
def __ne__(self, other):
"""Compare if other is not equal to self."""
return other != self._cmpkey()
class DotReadableDict(UnicodeMixin):
"""Parent class of Revision() and FileInfo().
Provide:
- __getitem__(), __unicode__() and __repr__().
"""
def __getitem__(self, key):
"""Give access to class values by key.
Revision class may also give access to its values by keys
e.g. revid parameter may be assigned by revision['revid']
as well as revision.revid. This makes formatting strings with
% operator easier.
"""
return getattr(self, key)
def __unicode__(self):
"""Return string representation."""
# TODO: This is more efficient if the PY2 test is done during
# class instantiation, and not inside the method.
if not PY2:
return repr(self.__dict__)
else:
_content = u', '.join(
u'{0}: {1}'.format(k, v) for k, v in self.__dict__.items())
return u'{{{0}}}'.format(_content)
def __repr__(self):
"""Return a more complete string representation."""
return repr(self.__dict__)
class FrozenDict(dict):
"""
Frozen dict, preventing write after initialisation.
Raises TypeError if write attempted.
"""
def __init__(self, data=None, error=None):
"""
Constructor.
@param data: mapping to freeze
@type data: mapping
@param error: error message
@type error: basestring
"""
if data:
args = [data]
else:
args = []
super(FrozenDict, self).__init__(*args)
self._error = error or 'FrozenDict: not writable'
def update(self, *args, **kwargs):
"""Prevent updates."""
raise TypeError(self._error)
__setitem__ = update
def concat_options(message, line_length, options):
"""Concatenate options."""
indent = len(message) + 2
line_length -= indent
option_msg = u''
option_line = u''
for option in options:
if option_line:
option_line += ', '
# +1 for ','
if len(option_line) + len(option) + 1 > line_length:
if option_msg:
option_msg += '\n' + ' ' * indent
option_msg += option_line[:-1] # remove space
option_line = ''
option_line += option
if option_line:
if option_msg:
option_msg += '\n' + ' ' * indent
option_msg += option_line
return u'{0} ({1}):'.format(message, option_msg)
class LazyRegex(object):
"""
Regex object that obtains and compiles the regex on usage.
Instances behave like the object created using L{re.compile}.
"""
def __init__(self, pattern, flags=0):
"""
Constructor.
@param pattern: L{re} regex pattern
@type pattern: str or callable
@param flags: L{re.compile} flags
@type flags: int
"""
self.raw = pattern
self.flags = flags
super(LazyRegex, self).__init__()
@property
def raw(self):
"""Get raw property."""
if callable(self._raw):
self._raw = self._raw()
return self._raw
@raw.setter
def raw(self, value):
"""Set raw property."""
self._raw = value
self._compiled = None
@property
def flags(self):
"""Get flags property."""
return self._flags
@flags.setter
def flags(self, value):
"""Set flags property."""
self._flags = value
self._compiled = None
def __getattr__(self, attr):
"""Compile the regex and delegate all attribute to the regex."""
if self._raw:
if not self._compiled:
self._compiled = re.compile(self.raw, self.flags)
if hasattr(self._compiled, attr):
return getattr(self._compiled, attr)
raise AttributeError('%s: attr %s not recognised'
% (self.__class__.__name__, attr))
else:
raise AttributeError('%s.raw not set' % self.__class__.__name__)
class DeprecatedRegex(LazyRegex):
"""Regex object that issues a deprecation notice."""
def __init__(self, pattern, flags=0, name=None, instead=None):
"""
Constructor.
If name is None, the regex pattern will be used as part of
the deprecation warning.
@param name: name of the object that is deprecated
@type name: str or None
@param instead: if provided, will be used to specify the replacement
of the deprecated name
@type instead: str
"""
super(DeprecatedRegex, self).__init__(pattern, flags)
self._name = name or self.raw
self._instead = instead
def __getattr__(self, attr):
"""Issue deprecation warning."""
issue_deprecation_warning(
self._name, self._instead, 2)
return super(DeprecatedRegex, self).__getattr__(attr)
def first_lower(string):
"""
Return a string with the first character uncapitalized.
Empty strings are supported. The original string is not changed.
"""
return string[:1].lower() + string[1:]
def first_upper(string):
"""
Return a string with the first character capitalized.
Empty strings are supported. The original string is not changed.
"""
return string[:1].upper() + string[1:]
def normalize_username(username):
"""Normalize the username."""
if not username:
return None
username = re.sub('[_ ]+', ' ', username).strip()
return first_upper(username)
class MediaWikiVersion(Version):
"""
Version object to allow comparing 'wmf' versions with normal ones.
The version mainly consist of digits separated by periods. After that is a
suffix which may only be 'wmf<number>', 'alpha', 'beta<number>' or
'-rc.<number>' (the - and . are optional). They are considered from old to
new in that order with a version number without suffix is considered the
newest. This secondary difference is stored in an internal _dev_version
attribute.
Two versions are equal if their normal version and dev version are equal. A
version is greater if the normal version or dev version is greater. For
example:
1.24 < 1.24.1 < 1.25wmf1 < 1.25alpha < 1.25beta1 < 1.25beta2
< 1.25-rc-1 < 1.25-rc.2 < 1.25
Any other suffixes are considered invalid.
"""
MEDIAWIKI_VERSION = re.compile(r'^(\d+(?:\.\d+)+)(wmf(\d+)|alpha|beta(\d+)|-?rc\.?(\d+))?$')
def parse(self, vstring):
"""Parse version string."""
version_match = MediaWikiVersion.MEDIAWIKI_VERSION.match(vstring)
if not version_match:
raise ValueError('Invalid version number "{0}"'.format(vstring))
components = [int(n) for n in version_match.group(1).split('.')]
# The _dev_version numbering scheme might change. E.g. if a stage
# between 'alpha' and 'beta' is added, 'beta', 'rc' and stable releases
# are reassigned (beta=3, rc=4, stable=5).
if version_match.group(3): # wmf version
self._dev_version = (0, int(version_match.group(3)))
elif version_match.group(4):
self._dev_version = (2, int(version_match.group(4)))
elif version_match.group(5):
self._dev_version = (3, int(version_match.group(5)))
elif version_match.group(2) == 'alpha':
self._dev_version = (1, )
else:
self._dev_version = (4, )
self.suffix = version_match.group(2) or ''
self.version = tuple(components)
def __str__(self):
"""Return version number with optional suffix."""
return '.'.join(str(v) for v in self.version) + self.suffix
def _cmp(self, other):
if isinstance(other, basestring):
other = MediaWikiVersion(other)
if self.version > other.version:
return 1
if self.version < other.version:
return -1
if self._dev_version > other._dev_version:
return 1
if self._dev_version < other._dev_version:
return -1
return 0
if PY2:
__cmp__ = _cmp
class ThreadedGenerator(threading.Thread):
"""Look-ahead generator class.
Runs a generator in a separate thread and queues the results; can
be called like a regular generator.
Subclasses should override self.generator, I{not} self.run
Important: the generator thread will stop itself if the generator's
internal queue is exhausted; but, if the calling program does not use
all the generated values, it must call the generator's stop() method to
stop the background thread. Example usage:
>>> gen = ThreadedGenerator(target=range, args=(20,))
>>> try:
... data = list(gen)
... finally:
... gen.stop()
>>> data
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
"""
def __init__(self, group=None, target=None, name="GeneratorThread",
args=(), kwargs=None, qsize=65536):
"""Constructor. Takes same keyword arguments as threading.Thread.
target must be a generator function (or other callable that returns
an iterable object).
@param qsize: The size of the lookahead queue. The larger the qsize,
the more values will be computed in advance of use (which can eat
up memory and processor time).
@type qsize: int
"""
if kwargs is None:
kwargs = {}
if target:
self.generator = target
if not hasattr(self, "generator"):
raise RuntimeError("No generator for ThreadedGenerator to run.")
self.args, self.kwargs = args, kwargs
threading.Thread.__init__(self, group=group, name=name)
self.queue = Queue.Queue(qsize)
self.finished = threading.Event()
def __iter__(self):
"""Iterate results from the queue."""
if not self.isAlive() and not self.finished.isSet():
self.start()
# if there is an item in the queue, yield it, otherwise wait
while not self.finished.isSet():
try:
yield self.queue.get(True, 0.25)
except Queue.Empty:
pass
except KeyboardInterrupt:
self.stop()
def stop(self):
"""Stop the background thread."""
self.finished.set()
def run(self):
"""Run the generator and store the results on the queue."""
iterable = any([hasattr(self.generator, key)
for key in ['__iter__', '__getitem__']])
if iterable and not self.args and not self.kwargs:
self.__gen = self.generator
else:
self.__gen = self.generator(*self.args, **self.kwargs)
for result in self.__gen:
while True:
if self.finished.isSet():
return
try:
self.queue.put_nowait(result)
except Queue.Full:
time.sleep(0.25)
continue
break
# wait for queue to be emptied, then kill the thread
while not self.finished.isSet() and not self.queue.empty():
time.sleep(0.25)
self.stop()
def itergroup(iterable, size):
"""Make an iterator that returns lists of (up to) size items from iterable.
Example:
>>> i = itergroup(range(25), 10)
>>> print(next(i))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> print(next(i))
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
>>> print(next(i))
[20, 21, 22, 23, 24]
>>> print(next(i))
Traceback (most recent call last):
...
StopIteration
"""
group = []
for item in iterable:
group.append(item)
if len(group) == size:
yield group
group = []
if group:
yield group
class ThreadList(list):
"""A simple threadpool class to limit the number of simultaneous threads.
Any threading.Thread object can be added to the pool using the append()
method. If the maximum number of simultaneous threads has not been reached,
the Thread object will be started immediately; if not, the append() call
will block until the thread is able to start.
>>> pool = ThreadList(limit=10)
>>> def work():
... time.sleep(1)
...
>>> for x in range(20):
... pool.append(threading.Thread(target=work))
...
"""
_logger = "threadlist"
def __init__(self, limit=128, *args):
"""Constructor."""
self.limit = limit
super(ThreadList, self).__init__(*args)
for item in self:
if not isinstance(threading.Thread, item):
raise TypeError("Cannot add '%s' to ThreadList" % type(item))
def active_count(self):
"""Return the number of alive threads, and delete all non-alive ones."""
cnt = 0
for item in self[:]:
if item.isAlive():
cnt += 1
else:
self.remove(item)
return cnt
def append(self, thd):
"""Add a thread to the pool and start it."""
if not isinstance(thd, threading.Thread):
raise TypeError("Cannot append '%s' to ThreadList" % type(thd))
while self.active_count() >= self.limit:
time.sleep(2)
super(ThreadList, self).append(thd)
thd.start()
debug("thread %d ('%s') started" % (len(self), type(thd)),
self._logger)
def stop_all(self):
"""Stop all threads the pool."""
if self:
debug(u'EARLY QUIT: Threads: %d' % len(self), self._logger)
for thd in self:
thd.stop()
debug(u'EARLY QUIT: Queue size left in %s: %s'
% (thd, thd.queue.qsize()), self._logger)
def intersect_generators(genlist):
"""
Intersect generators listed in genlist.
Yield items only if they are yielded by all generators in genlist.
Threads (via ThreadedGenerator) are used in order to run generators
in parallel, so that items can be yielded before generators are
exhausted.
Threads are stopped when they are either exhausted or Ctrl-C is pressed.
Quitting before all generators are finished is attempted if
there is no more chance of finding an item in all queues.
@param genlist: list of page generators
@type genlist: list
"""
# If any generator is empty, no pages are going to be returned
for source in genlist:
if not source:
debug('At least one generator ({0!r}) is empty and execution was '
'skipped immediately.'.format(source), 'intersect')
return
# Item is cached to check that it is found n_gen
# times before being yielded.
cache = collections.defaultdict(set)
n_gen = len(genlist)
# Class to keep track of alive threads.
# Start new threads and remove completed threads.
thrlist = ThreadList()
for source in genlist:
threaded_gen = ThreadedGenerator(name=repr(source), target=source)
thrlist.append(threaded_gen)
while True:
# Get items from queues in a round-robin way.
for t in thrlist:
try:
# TODO: evaluate if True and timeout is necessary.
item = t.queue.get(True, 0.1)
# Cache entry is a set of thread.
# Duplicates from same thread are not counted twice.
cache[item].add(t)
if len(cache[item]) == n_gen:
yield item
# Remove item from cache.
# No chance of seeing it again (see later: early stop).
cache.pop(item)
active = thrlist.active_count()
max_cache = n_gen
if cache.values():
max_cache = max(len(v) for v in cache.values())
# No. of active threads is not enough to reach n_gen.
# We can quit even if some thread is still active.
# There could be an item in all generators which has not yet
# appeared from any generator. Only when we have lost one
# generator, then we can bail out early based on seen items.
if active < n_gen and n_gen - max_cache > active:
thrlist.stop_all()
return
except Queue.Empty:
pass
except KeyboardInterrupt:
thrlist.stop_all()
finally:
# All threads are done.
if thrlist.active_count() == 0:
return
def filter_unique(iterable, container=None, key=None, add=None):
"""
Yield unique items from an iterable, omitting duplicates.
By default, to provide uniqueness, it puts the generated items into
the keys of a dict created as a local variable, each with a value of True.
It only yields items which are not already present in the local dict.
For large collections, this is not memory efficient, as a strong reference
to every item is kept in a local dict which can not be cleared.
Also, the local dict cant be re-used when chaining unique operations on
multiple generators.
To avoid these issues, it is advisable for the caller to provide their own
container and set the key parameter to be the function L{hash}, or use a
L{weakref} as the key.
The container can be any object that supports __contains__.
If the container is a set or dict, the method add or __setitem__ will be
used automatically. Any other method may be provided explicitly using the
add parameter.
Note: This is not thread safe.
@param iterable: the source iterable
@type iterable: collections.Iterable
@param container: storage of seen items
@type container: type
@param key: function to convert the item to a key
@type key: callable
@param add: function to add an item to the container
@type add: callable
"""
if container is None:
container = {}
if not add:
if hasattr(container, 'add'):
def container_add(x):
container.add(key(x) if key else x)
add = container_add
else:
def container_setitem(x):
container.__setitem__(key(x) if key else x,
True)
add = container_setitem
for item in iterable:
if (key(item) if key else item) not in container:
add(item)
yield item
class CombinedError(KeyError, IndexError):
"""An error that gets caught by both KeyError and IndexError."""
class EmptyDefault(str, collections.Mapping):
"""
A default for a not existing siteinfo property.
It should be chosen if there is no better default known. It acts like an
empty collections, so it can be iterated through it savely if treated as a
list, tuple, set or dictionary. It is also basically an empty string.
Accessing a value via __getitem__ will result in an combined KeyError and
IndexError.
"""
def __init__(self):
"""Initialise the default as an empty string."""
str.__init__(self)
def _empty_iter(self):
"""An iterator which does nothing and drops the argument."""
return empty_iterator()
def __getitem__(self, key):
"""Raise always a L{CombinedError}."""
raise CombinedError(key)
iteritems = itervalues = iterkeys = __iter__ = _empty_iter
EMPTY_DEFAULT = EmptyDefault()
class SelfCallMixin(object):
"""
Return self when called.
When '_own_desc' is defined it'll also issue a deprecation warning using
issue_deprecation_warning('Calling ' + _own_desc, 'it directly').
"""
def __call__(self):
"""Do nothing and just return itself."""
if hasattr(self, '_own_desc'):
issue_deprecation_warning('Calling {0}'.format(self._own_desc),
'it directly', 2)
return self
class SelfCallDict(SelfCallMixin, dict):
"""Dict with SelfCallMixin."""
class SelfCallString(SelfCallMixin, str):
"""Unicode string with SelfCallMixin."""
class DequeGenerator(collections.deque):
"""A generator that allows items to be added during generating."""
def __iter__(self):
"""Return the object which will be iterated."""
return self
def next(self):
"""Python 3 iterator method."""
if len(self):
return self.popleft()
else:
raise StopIteration
def __next__(self):
"""Python 3 iterator method."""
return self.next()
class ContextManagerWrapper(object):
"""
Wraps an object in a context manager.
It is redirecting all access to the wrapped object and executes 'close' when
used as a context manager in with-statements. In such statements the value
set via 'as' is directly the wrapped object. For example:
wrapped = ContextManagerWrapper(an_object)
with wrapped as another_object:
assert(another_object is an_object)
It does not subclass the object though, so isinstance checks will fail
outside a with-statement.
"""
def __init__(self, wrapped):
"""Create a new wrapper."""
super(ContextManagerWrapper, self).__init__()
super(ContextManagerWrapper, self).__setattr__('_wrapped', wrapped)
def __enter__(self):
"""Enter a context manager and use the wrapped object directly."""
return self._wrapped
def __exit__(self, exc_type, exc_value, traceback):
"""Call close on the wrapped object when exiting a context manager."""
self._wrapped.close()
def __getattr__(self, name):
"""Get the attribute from the wrapped object."""
return getattr(self._wrapped, name)
def __setattr__(self, name, value):
"""Set the attribute in the wrapped object."""
setattr(self._wrapped, name, value)
def open_compressed(filename, use_extension=False):
"""
Open a file and uncompress it if needed.
This function supports bzip2, gzip and 7zip as compression containers. It
uses the packages available in the standard library for bzip2 and gzip so
they are always available. 7zip is only available when a 7za program is
available.
The compression is either selected via the magic number or file ending.
@param filename: The filename.
@type filename: str
@param use_extension: Use the file extension instead of the magic number
to determine the type of compression (default False).
@type use_extension: bool
@raises ValueError: When 7za is not available.
@raises OSError: When it's not a 7z archive but the file extension is 7z.
It is also raised by bz2 when its content is invalid. gzip does not
immediately raise that error but only on reading it.
@return: A file-like object returning the uncompressed data in binary mode.
Before Python 2.7 the GzipFile object and before 2.7.1 the BZ2File are
wrapped in a ContextManagerWrapper with its advantages/disadvantages.
@rtype: file-like object
"""
def wrap(wrapped, sub_ver):
"""Wrap in a wrapper when this is below Python version 2.7."""
if PYTHON_VERSION < (2, 7, sub_ver):
return ContextManagerWrapper(wrapped)
else:
return wrapped
if use_extension:
# if '.' not in filename, it'll be 1 character long but otherwise
# contain the period
extension = filename[filename.rfind('.'):][1:]
else:
with open(filename, 'rb') as f:
magic_number = f.read(8)
if magic_number.startswith(b'BZh'):
extension = 'bz2'
elif magic_number.startswith(b'\x1F\x8B\x08'):
extension = 'gz'
elif magic_number.startswith(b"7z\xBC\xAF'\x1C"):
extension = '7z'
else:
extension = ''
if extension == 'bz2':
return wrap(bz2.BZ2File(filename), 1)
elif extension == 'gz':
return wrap(gzip.open(filename), 0)
elif extension == '7z':
try:
process = subprocess.Popen(['7za', 'e', '-bd', '-so', filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=65535)
except OSError:
raise ValueError('7za is not installed and can not '
'uncompress "{0}"'.format(filename))
else:
stderr = process.stderr.read()
process.stderr.close()
if b'Everything is Ok' not in stderr:
process.stdout.close()
# OSError is also raised when bz2 is invalid
raise OSError('Invalid 7z archive.')
else:
return process.stdout
else:
# assume it's an uncompressed XML file
return open(filename, 'rb')
def merge_unique_dicts(*args, **kwargs):
"""
Return a merged dict and making sure that the original dicts had unique keys.
The positional arguments are the dictionaries to be merged. It is also
possible to define an additional dict using the keyword arguments.
"""
args = list(args) + [dict(kwargs)]
conflicts = set()
result = {}
for arg in args:
conflicts |= set(arg.keys()) & set(result.keys())
result.update(arg)
if conflicts:
raise ValueError('Multiple dicts contain the same keys: '
'{0}'.format(', '.join(sorted(unicode(key) for key in conflicts))))
return result
# Decorators
#
# Decorator functions without parameters are _invoked_ differently from
# decorator functions with function syntax. For example, @deprecated causes
# a different invocation to @deprecated().
# The former is invoked with the decorated function as args[0].
# The latter is invoked with the decorator arguments as *args & **kwargs,
# and it must return a callable which will be invoked with the decorated
# function as args[0].
# The follow deprecators may support both syntax, e.g. @deprecated and
# @deprecated() both work. In order to achieve that, the code inspects
# args[0] to see if it callable. Therefore, a decorator must not accept
# only one arg, and that arg be a callable, as it will be detected as
# a deprecator without any arguments.
def signature(obj):
"""
Safely return function Signature object (PEP 362).
inspect.signature was introduced in 3.3, however backports are available.
In Python 3.3, it does not support all types of callables, and should
not be relied upon. Python 3.4 works correctly.
Any exception calling inspect.signature is ignored and None is returned.
@param obj: Function to inspect
@type obj: callable
@rtype: inpect.Signature or None
"""
try:
return inspect.signature(obj)
except (AttributeError, ValueError):
return None
def add_decorated_full_name(obj, stacklevel=1):
"""Extract full object name, including class, and store in __full_name__.
This must be done on all decorators that are chained together, otherwise
the second decorator will have the wrong full name.
@param obj: A object being decorated
@type obj: object
@param stacklevel: level to use
@type stacklevel: int
"""
if hasattr(obj, '__full_name__'):
return
# The current frame is add_decorated_full_name
# The next frame is the decorator
# The next frame is the object being decorated
frame = sys._getframe(stacklevel + 1)
class_name = frame.f_code.co_name
if class_name and class_name != '<module>':
obj.__full_name__ = (obj.__module__ + '.' +
class_name + '.' +
obj.__name__)
else:
obj.__full_name__ = (obj.__module__ + '.' +
obj.__name__)
def manage_wrapping(wrapper, obj):
"""Add attributes to wrapper and wrapped functions."""
wrapper.__doc__ = obj.__doc__
wrapper.__name__ = obj.__name__
wrapper.__module__ = obj.__module__
wrapper.__signature__ = signature(obj)
if not hasattr(obj, '__full_name__'):
add_decorated_full_name(obj, 2)
wrapper.__full_name__ = obj.__full_name__
# Use the previous wrappers depth, if it exists
wrapper.__depth__ = getattr(obj, '__depth__', 0) + 1
# Obtain the wrapped object from the previous wrapper
wrapped = getattr(obj, '__wrapped__', obj)
wrapper.__wrapped__ = wrapped
# Increment the number of wrappers
if hasattr(wrapped, '__wrappers__'):
wrapped.__wrappers__ += 1
else:
wrapped.__wrappers__ = 1
def get_wrapper_depth(wrapper):
"""Return depth of wrapper function."""
return wrapper.__wrapped__.__wrappers__ + (1 - wrapper.__depth__)
def add_full_name(obj):
"""
A decorator to add __full_name__ to the function being decorated.
This should be done for all decorators used in pywikibot, as any
decorator that does not add __full_name__ will prevent other
decorators in the same chain from being able to obtain it.
This can be used to monkey-patch decorators in other modules.
e.g.
<xyz>.foo = add_full_name(<xyz>.foo)
@param obj: The function to decorate
@type obj: callable
@return: decorating function
@rtype: function
"""
def outer_wrapper(*outer_args, **outer_kwargs):
"""Outer wrapper.
The outer wrapper may be the replacement function if the decorated
decorator was called without arguments, or the replacement decorator
if the decorated decorator was called without arguments.
@param outer_args: args
@type outer_args: list
@param outer_kwargs: kwargs
@type outer_kwargs: dict
"""
def inner_wrapper(*args, **kwargs):
"""Replacement function.
If the decorator supported arguments, they are in outer_args,
and this wrapper is used to process the args which belong to
the function that the decorated decorator was decorating.
@param args: args passed to the decorated function.
@param kwargs: kwargs passed to the decorated function.
"""
add_decorated_full_name(args[0])
return obj(*outer_args, **outer_kwargs)(*args, **kwargs)
inner_wrapper.__doc__ = obj.__doc__
inner_wrapper.__name__ = obj.__name__
inner_wrapper.__module__ = obj.__module__
inner_wrapper.__signature__ = signature(obj)
# The decorator being decorated may have args, so both
# syntax need to be supported.
if (len(outer_args) == 1 and len(outer_kwargs) == 0 and
callable(outer_args[0])):
add_decorated_full_name(outer_args[0])
return obj(outer_args[0])
else:
return inner_wrapper
if not __debug__:
return obj
return outer_wrapper
def issue_deprecation_warning(name, instead, depth, warning_class=None):
"""Issue a deprecation warning."""
if instead:
if warning_class is None:
warning_class = DeprecationWarning
warn(u'{0} is deprecated; use {1} instead.'.format(name, instead),
warning_class, depth + 1)
else:
if warning_class is None:
warning_class = _NotImplementedWarning
warn('{0} is deprecated.'.format(name), warning_class, depth + 1)
@add_full_name
def deprecated(*args, **kwargs):
"""Decorator to output a deprecation warning.
@kwarg instead: if provided, will be used to specify the replacement
@type instead: string
"""
def decorator(obj):
"""Outer wrapper.
The outer wrapper is used to create the decorating wrapper.
@param obj: function being wrapped
@type obj: object
"""
def wrapper(*args, **kwargs):
"""Replacement function.
@param args: args passed to the decorated function.
@type args: list
@param kwargs: kwargs passed to the decorated function.
@type kwargs: dict
@return: the value returned by the decorated function
@rtype: any
"""
name = obj.__full_name__
depth = get_wrapper_depth(wrapper) + 1
issue_deprecation_warning(name, instead, depth)
return obj(*args, **kwargs)
if not __debug__:
return obj
manage_wrapping(wrapper, obj)
return wrapper
without_parameters = len(args) == 1 and len(kwargs) == 0 and callable(args[0])
if 'instead' in kwargs:
instead = kwargs['instead']
elif not without_parameters and len(args) == 1:
instead = args[0]
else:
instead = False
# When called as @deprecated, return a replacement function
if without_parameters:
if not __debug__:
return args[0]
return decorator(args[0])
# Otherwise return a decorator, which returns a replacement function
else:
return decorator
def deprecate_arg(old_arg, new_arg):
"""Decorator to declare old_arg deprecated and replace it with new_arg."""
return deprecated_args(**{old_arg: new_arg})
def deprecated_args(**arg_pairs):
"""
Decorator to declare multiple args deprecated.
@param arg_pairs: Each entry points to the new argument name. With True or
None it drops the value and prints a warning. If False it just drops
the value.
"""
def decorator(obj):
"""Outer wrapper.
The outer wrapper is used to create the decorating wrapper.
@param obj: function being wrapped
@type obj: object
"""
def wrapper(*__args, **__kw):
"""Replacement function.
@param __args: args passed to the decorated function
@type __args: list
@param __kwargs: kwargs passed to the decorated function
@type __kwargs: dict
@return: the value returned by the decorated function
@rtype: any
"""
name = obj.__full_name__
depth = get_wrapper_depth(wrapper) + 1
for old_arg, new_arg in arg_pairs.items():
output_args = {
'name': name,
'old_arg': old_arg,
'new_arg': new_arg,
}
if old_arg in __kw:
if new_arg not in [True, False, None]:
if new_arg in __kw:
warn(u"%(new_arg)s argument of %(name)s "
u"replaces %(old_arg)s; cannot use both."
% output_args,
RuntimeWarning, depth)
else:
# If the value is positionally given this will
# cause a TypeError, which is intentional
warn(u"%(old_arg)s argument of %(name)s "
u"is deprecated; use %(new_arg)s instead."
% output_args,
DeprecationWarning, depth)
__kw[new_arg] = __kw[old_arg]
else:
if new_arg is False:
cls = PendingDeprecationWarning
else:
cls = DeprecationWarning
warn(u"%(old_arg)s argument of %(name)s is deprecated."
% output_args,
cls, depth)
del __kw[old_arg]
return obj(*__args, **__kw)
if not __debug__:
return obj
manage_wrapping(wrapper, obj)
if wrapper.__signature__:
# Build a new signature with deprecated args added.
# __signature__ is only available in Python 3 which has OrderedDict
params = OrderedDict()
for param in wrapper.__signature__.parameters.values():
params[param.name] = param.replace()
for old_arg, new_arg in arg_pairs.items():
params[old_arg] = inspect.Parameter(
old_arg, kind=inspect._POSITIONAL_OR_KEYWORD,
default='[deprecated name of ' + new_arg + ']'
if new_arg not in [True, False, None]
else NotImplemented)
wrapper.__signature__ = inspect.Signature()
wrapper.__signature__._parameters = params
return wrapper
return decorator
def remove_last_args(arg_names):
"""
Decorator to declare all args additionally provided deprecated.
All positional arguments appearing after the normal arguments are marked
deprecated. It marks also all keyword arguments present in arg_names as
deprecated. Any arguments (positional or keyword) which are not present in
arg_names are forwarded. For example a call with 3 parameters and the
original function requests one and arg_names contain one name will result
in an error, because the function got called with 2 parameters.
The decorated function may not use *args or **kwargs.
@param arg_names: The names of all arguments.
@type arg_names: iterable; for the most explanatory message it should
retain the given order (so not a set for example).
"""
def decorator(obj):
"""Outer wrapper.
The outer wrapper is used to create the decorating wrapper.
@param obj: function being wrapped
@type obj: object
"""
def wrapper(*__args, **__kw):
"""Replacement function.
@param __args: args passed to the decorated function
@type __args: list
@param __kwargs: kwargs passed to the decorated function
@type __kwargs: dict
@return: the value returned by the decorated function
@rtype: any
"""
name = obj.__full_name__
depth = get_wrapper_depth(wrapper) + 1
args, varargs, kwargs, _ = inspect.getargspec(wrapper.__wrapped__)
if varargs is not None and kwargs is not None:
raise ValueError(u'{1} may not have * or ** args.'.format(
name))
deprecated = set(__kw) & set(arg_names)
if len(__args) > len(args):
deprecated.update(arg_names[:len(__args) - len(args)])
# remove at most |arg_names| entries from the back
new_args = tuple(__args[:max(len(args), len(__args) - len(arg_names))])
new_kwargs = dict((arg, val) for arg, val in __kw.items()
if arg not in arg_names)
if deprecated:
# sort them according to arg_names
deprecated = [arg for arg in arg_names if arg in deprecated]
warn(u"The trailing arguments ('{0}') of {1} are deprecated. "
u"The value(s) provided for '{2}' have been dropped.".
format("', '".join(arg_names),
name,
"', '".join(deprecated)),
DeprecationWarning, depth)
return obj(*new_args, **new_kwargs)
manage_wrapping(wrapper, obj)
return wrapper
return decorator
def redirect_func(target, source_module=None, target_module=None,
old_name=None, class_name=None):
"""
Return a function which can be used to redirect to 'target'.
It also acts like marking that function deprecated and copies all
parameters.
@param target: The targeted function which is to be executed.
@type target: callable
@param source_module: The module of the old function. If '.' defaults
to target_module. If 'None' (default) it tries to guess it from the
executing function.
@type source_module: basestring
@param target_module: The module of the target function. If
'None' (default) it tries to get it from the target. Might not work
with nested classes.
@type target_module: basestring
@param old_name: The old function name. If None it uses the name of the
new function.
@type old_name: basestring
@param class_name: The name of the class. It's added to the target and
source module (separated by a '.').
@type class_name: basestring
@return: A new function which adds a warning prior to each execution.
@rtype: callable
"""
def call(*a, **kw):
issue_deprecation_warning(old_name, new_name, 2)
return target(*a, **kw)
if target_module is None:
target_module = target.__module__
if target_module and target_module[-1] != '.':
target_module += '.'
if source_module is '.':
source_module = target_module
elif source_module and source_module[-1] != '.':
source_module += '.'
else:
source_module = sys._getframe(1).f_globals['__name__'] + '.'
if class_name:
target_module += class_name + '.'
source_module += class_name + '.'
old_name = source_module + (old_name or target.__name__)
new_name = target_module + target.__name__
if not __debug__:
return target
return call
class ModuleDeprecationWrapper(types.ModuleType):
"""A wrapper for a module to deprecate classes or variables of it."""
def __init__(self, module):
"""
Initialise the wrapper.
It will automatically overwrite the module with this instance in
C{sys.modules}.
@param module: The module name or instance
@type module: str or module
"""
if isinstance(module, basestring):
module = sys.modules[module]
super(ModuleDeprecationWrapper, self).__setattr__('_deprecated', {})
super(ModuleDeprecationWrapper, self).__setattr__('_module', module)
super(ModuleDeprecationWrapper, self).__setattr__('__doc__', module.__doc__)
if __debug__:
sys.modules[module.__name__] = self
def _add_deprecated_attr(self, name, replacement=None,
replacement_name=None, warning_message=None):
"""
Add the name to the local deprecated names dict.
@param name: The name of the deprecated class or variable. It may not
be already deprecated.
@type name: str
@param replacement: The replacement value which should be returned
instead. If the name is already an attribute of that module this
must be None. If None it'll return the attribute of the module.
@type replacement: any
@param replacement_name: The name of the new replaced value. Required
if C{replacement} is not None and it has no __name__ attribute.
@type replacement_name: str
@param warning_message: The warning to display, with positional
variables: {0} = module, {1} = attribute name, {2} = replacement.
@type warning_message: basestring
"""
if '.' in name:
raise ValueError('Deprecated name "{0}" may not contain '
'".".'.format(name))
if name in self._deprecated:
raise ValueError('Name "{0}" is already deprecated.'.format(name))
if replacement is not None and hasattr(self._module, name):
raise ValueError('Module has already an attribute named '
'"{0}".'.format(name))
if replacement_name is None:
if hasattr(replacement, '__name__'):
replacement_name = replacement.__module__
if hasattr(replacement, '__self__'):
replacement_name += '.'
replacement_name += replacement.__self__.__class__.__name__
replacement_name += '.' + replacement.__name__
else:
raise TypeError('Replacement must have a __name__ attribute '
'or a replacement name must be set '
'specifically.')
if not warning_message:
if replacement_name:
warning_message = '{0}.{1} is deprecated; use {2} instead.'
else:
warning_message = u"{0}.{1} is deprecated."
self._deprecated[name] = replacement_name, replacement, warning_message
def __setattr__(self, attr, value):
"""Set the value of the wrapped module."""
setattr(self._module, attr, value)
def __getattr__(self, attr):
"""Return the attribute with a deprecation warning if required."""
if attr in self._deprecated:
warning_message = self._deprecated[attr][2]
warn(warning_message.format(self._module.__name__, attr,
self._deprecated[attr][0]),
DeprecationWarning, 2)
if self._deprecated[attr][1]:
return self._deprecated[attr][1]
return getattr(self._module, attr)
| 33.614083 | 96 | 0.60659 |
aced6cb583f0b3780559ba41e25be0f13e9eeb9a | 4,374 | py | Python | homeassistant/components/iaqualink/climate.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 1 | 2022-02-21T05:50:41.000Z | 2022-02-21T05:50:41.000Z | homeassistant/components/iaqualink/climate.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 25 | 2021-10-02T10:01:14.000Z | 2022-03-31T06:11:49.000Z | homeassistant/components/iaqualink/climate.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 1 | 2021-12-10T10:33:28.000Z | 2021-12-10T10:33:28.000Z | """Support for Aqualink Thermostats."""
from __future__ import annotations
import logging
from iaqualink.const import (
AQUALINK_TEMP_CELSIUS_HIGH,
AQUALINK_TEMP_CELSIUS_LOW,
AQUALINK_TEMP_FAHRENHEIT_HIGH,
AQUALINK_TEMP_FAHRENHEIT_LOW,
)
from iaqualink.device import AqualinkHeater, AqualinkPump, AqualinkSensor, AqualinkState
from homeassistant.components.climate import ClimateEntity, ClimateEntityFeature
from homeassistant.components.climate.const import DOMAIN, HVAC_MODE_HEAT, HVAC_MODE_OFF
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import AqualinkEntity, refresh_system
from .const import CLIMATE_SUPPORTED_MODES, DOMAIN as AQUALINK_DOMAIN
from .utils import await_or_reraise
_LOGGER = logging.getLogger(__name__)
PARALLEL_UPDATES = 0
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up discovered switches."""
devs = []
for dev in hass.data[AQUALINK_DOMAIN][DOMAIN]:
devs.append(HassAqualinkThermostat(dev))
async_add_entities(devs, True)
class HassAqualinkThermostat(AqualinkEntity, ClimateEntity):
"""Representation of a thermostat."""
_attr_supported_features = ClimateEntityFeature.TARGET_TEMPERATURE
@property
def name(self) -> str:
"""Return the name of the thermostat."""
return self.dev.label.split(" ")[0]
@property
def hvac_modes(self) -> list[str]:
"""Return the list of supported HVAC modes."""
return CLIMATE_SUPPORTED_MODES
@property
def pump(self) -> AqualinkPump:
"""Return the pump device for the current thermostat."""
pump = f"{self.name.lower()}_pump"
return self.dev.system.devices[pump]
@property
def hvac_mode(self) -> str:
"""Return the current HVAC mode."""
state = AqualinkState(self.heater.state)
if state == AqualinkState.ON:
return HVAC_MODE_HEAT
return HVAC_MODE_OFF
@refresh_system
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Turn the underlying heater switch on or off."""
if hvac_mode == HVAC_MODE_HEAT:
await await_or_reraise(self.heater.turn_on())
elif hvac_mode == HVAC_MODE_OFF:
await await_or_reraise(self.heater.turn_off())
else:
_LOGGER.warning("Unknown operation mode: %s", hvac_mode)
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
if self.dev.system.temp_unit == "F":
return TEMP_FAHRENHEIT
return TEMP_CELSIUS
@property
def min_temp(self) -> int:
"""Return the minimum temperature supported by the thermostat."""
if self.temperature_unit == TEMP_FAHRENHEIT:
return AQUALINK_TEMP_FAHRENHEIT_LOW
return AQUALINK_TEMP_CELSIUS_LOW
@property
def max_temp(self) -> int:
"""Return the minimum temperature supported by the thermostat."""
if self.temperature_unit == TEMP_FAHRENHEIT:
return AQUALINK_TEMP_FAHRENHEIT_HIGH
return AQUALINK_TEMP_CELSIUS_HIGH
@property
def target_temperature(self) -> float:
"""Return the current target temperature."""
return float(self.dev.state)
@refresh_system
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
await await_or_reraise(self.dev.set_temperature(int(kwargs[ATTR_TEMPERATURE])))
@property
def sensor(self) -> AqualinkSensor:
"""Return the sensor device for the current thermostat."""
sensor = f"{self.name.lower()}_temp"
return self.dev.system.devices[sensor]
@property
def current_temperature(self) -> float | None:
"""Return the current temperature."""
if self.sensor.state != "":
return float(self.sensor.state)
return None
@property
def heater(self) -> AqualinkHeater:
"""Return the heater device for the current thermostat."""
heater = f"{self.name.lower()}_heater"
return self.dev.system.devices[heater]
| 33.646154 | 88 | 0.69593 |
aced6d736928b34c35b2a8ed09a38b678d65bf3f | 2,080 | py | Python | impala/tests/test_impala.py | wzhou-code/impyla | 2ff9bf757e3ea6061007470e8cdb0f1a492fbf42 | [
"Apache-2.0"
] | 661 | 2015-01-08T08:33:43.000Z | 2022-03-28T06:29:15.000Z | impala/tests/test_impala.py | wzhou-code/impyla | 2ff9bf757e3ea6061007470e8cdb0f1a492fbf42 | [
"Apache-2.0"
] | 389 | 2015-01-13T14:58:47.000Z | 2022-03-29T14:45:17.000Z | impala/tests/test_impala.py | wzhou-code/impyla | 2ff9bf757e3ea6061007470e8cdb0f1a492fbf42 | [
"Apache-2.0"
] | 262 | 2015-01-24T02:29:47.000Z | 2022-03-03T19:13:07.000Z | # Copyright 2019 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pytest
from impala.compat import _xrange as xrange
from pytest import yield_fixture
BIGGER_TABLE_NUM_ROWS = 100
@yield_fixture(scope='module')
def bigger_table(cur):
table_name = 'tmp_bigger_table'
ddl = """CREATE TABLE {0} (s string)
STORED AS PARQUET""".format(table_name)
cur.execute(ddl)
dml = """INSERT INTO {0}
VALUES {1}""".format(table_name,
",".join(["('row{0}')".format(i) for i in xrange(BIGGER_TABLE_NUM_ROWS)]))
# Disable codegen and expr rewrites so query runs faster.
cur.execute("set disable_codegen=1")
cur.execute("set enable_expr_rewrites=0")
cur.execute(dml)
try:
yield table_name
finally:
cur.execute("DROP TABLE {0}".format(table_name))
def test_has_more_rows(cur, bigger_table):
"""Test that impyla correctly handles empty row batches returned with the
hasMoreRows flag."""
# Set the fetch timeout very low and add sleeps so that Impala will return
# empty batches. Run on a single node with a single thread to make as predictable
# as possible.
cur.execute("set fetch_rows_timeout_ms=1")
cur.execute("set num_nodes=1")
cur.execute("set mt_dop=1")
cur.execute("""select *
from {0}
where s != cast(sleep(2) as string)""".format(bigger_table))
expected_rows = [("row{0}".format(i),) for i in xrange(BIGGER_TABLE_NUM_ROWS)]
assert sorted(cur.fetchall()) == sorted(expected_rows)
| 37.818182 | 91 | 0.690865 |
aced6e531b5a4a9418ea9e8d9b8ff9a901b42d66 | 10,307 | py | Python | tests/dashboard/test_discounts.py | DowneyTung/saleor | 50f299d8e276b594753ee439d9e1a212f85a91b1 | [
"CC-BY-4.0"
] | 19 | 2019-12-03T17:28:07.000Z | 2021-09-10T21:30:52.000Z | tests/dashboard/test_discounts.py | DowneyTung/saleor | 50f299d8e276b594753ee439d9e1a212f85a91b1 | [
"CC-BY-4.0"
] | 32 | 2019-12-16T11:18:35.000Z | 2021-03-19T03:33:15.000Z | tests/dashboard/test_discounts.py | DowneyTung/saleor | 50f299d8e276b594753ee439d9e1a212f85a91b1 | [
"CC-BY-4.0"
] | 20 | 2020-02-03T00:38:59.000Z | 2022-01-03T13:07:52.000Z | import json
from decimal import Decimal
from unittest.mock import Mock
import pytest
from django.urls import reverse
from prices import Money, TaxedMoney
from saleor.dashboard.order.utils import get_voucher_discount_for_order
from saleor.discount import DiscountValueType, VoucherType
from saleor.discount.models import NotApplicable, Sale, Voucher
from saleor.product.models import Collection
def test_sales_list(admin_client):
url = reverse("dashboard:sale-list")
response = admin_client.get(url)
assert response.status_code == 200
def test_vouchers_list(admin_client):
url = reverse("dashboard:voucher-list")
response = admin_client.get(url)
assert response.status_code == 200
def test_voucher_shipping_add(admin_client):
assert Voucher.objects.count() == 0
url = reverse("dashboard:voucher-add")
data = {
"code": "TESTVOUCHER",
"name": "Test Voucher",
"start_date": "2018-01-01",
"end_date": "2018-06-01",
"type": VoucherType.SHIPPING,
"discount_value": "15.99",
"discount_value_type": DiscountValueType.FIXED,
"shipping-min_spent_0": "59.99",
"shipping-min_spent_1": "USD",
}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
assert Voucher.objects.count() == 1
voucher = Voucher.objects.all()[0]
assert voucher.type == VoucherType.SHIPPING
assert voucher.code == data["code"]
assert voucher.name == data["name"]
assert voucher.start_date.isoformat() == "2018-01-01T06:00:00+00:00"
assert voucher.end_date.isoformat() == "2018-06-01T05:00:00+00:00"
assert voucher.discount_value_type == DiscountValueType.FIXED
assert voucher.discount_value == Decimal("15.99")
assert voucher.min_spent == Money("59.99", "USD")
def test_view_sale_add(admin_client, category, collection):
url = reverse("dashboard:sale-add")
data = {
"name": "Free products",
"type": DiscountValueType.PERCENTAGE,
"value": 100,
"categories": [category.id],
"collections": [collection.id],
"start_date": "2018-01-01",
}
response = admin_client.post(url, data)
assert response.status_code == 302
assert Sale.objects.count() == 1
sale = Sale.objects.first()
assert sale.name == data["name"]
assert category in sale.categories.all()
assert collection in sale.collections.all()
def test_view_sale_update_invalid_values(admin_client, sale, collection):
url = reverse("dashboard:sale-update", kwargs={"pk": sale.pk})
data = {
"name": sale.name,
"categories": [],
"collections": [],
"start_date": "2018-01-01",
}
response = admin_client.post(url, data)
assert response.status_code == 200
form_ctx = response.context["form"]
assert form_ctx.errors == {
"type": ["This field is required."],
"value": ["This field is required."],
}
def test_view_sale_add_requires_product_category_or_collection(
admin_client, category, product, collection
):
initial_sales_count = Sale.objects.count()
url = reverse("dashboard:sale-add")
data = {
"name": "Free products",
"type": DiscountValueType.PERCENTAGE,
"value": 100,
"start_date": "2018-01-01",
}
response = admin_client.post(url, data)
assert response.status_code == 200
assert Sale.objects.count() == initial_sales_count
products_data = [
{"categories": [category.id]},
{"products": [product.id]},
{"collections": [collection.pk]},
]
for count, proper_data in enumerate(products_data):
proper_data.update(data)
response = admin_client.post(url, proper_data)
assert response.status_code == 302
assert Sale.objects.count() == 1 + initial_sales_count + count
@pytest.mark.parametrize(
"subtotal, discount_value, discount_type, min_spent_amount, expected_value",
[
("100", 10, DiscountValueType.FIXED, None, 10),
("100.05", 10, DiscountValueType.PERCENTAGE, 100, 10),
],
)
def test_value_voucher_order_discount(
subtotal, discount_value, discount_type, min_spent_amount, expected_value
):
voucher = Voucher(
code="unique",
type=VoucherType.ENTIRE_ORDER,
discount_value_type=discount_type,
discount_value=discount_value,
min_spent=Money(min_spent_amount, "USD")
if min_spent_amount is not None
else None,
)
subtotal = Money(subtotal, "USD")
subtotal = TaxedMoney(net=subtotal, gross=subtotal)
order = Mock(get_subtotal=Mock(return_value=subtotal), voucher=voucher)
discount = get_voucher_discount_for_order(order)
assert discount == Money(expected_value, "USD")
@pytest.mark.parametrize(
"shipping_cost, discount_value, discount_type, expected_value",
[(10, 50, DiscountValueType.PERCENTAGE, 5), (10, 20, DiscountValueType.FIXED, 10)],
)
def test_shipping_voucher_order_discount(
shipping_cost, discount_value, discount_type, expected_value
):
voucher = Voucher(
code="unique",
type=VoucherType.SHIPPING,
discount_value_type=discount_type,
discount_value=discount_value,
min_spent_amount=None,
)
subtotal = Money(100, "USD")
subtotal = TaxedMoney(net=subtotal, gross=subtotal)
shipping_total = Money(shipping_cost, "USD")
order = Mock(
get_subtotal=Mock(return_value=subtotal),
shipping_price=shipping_total,
voucher=voucher,
)
discount = get_voucher_discount_for_order(order)
assert discount == Money(expected_value, "USD")
@pytest.mark.parametrize(
"total, total_quantity, min_spent_amount, min_checkout_items_quantity,"
"voucher_type",
[
(99, 10, 100, 10, VoucherType.SHIPPING),
(100, 9, 100, 10, VoucherType.SHIPPING),
(99, 9, 100, 10, VoucherType.SHIPPING),
(99, 10, 100, 10, VoucherType.ENTIRE_ORDER),
(100, 9, 100, 10, VoucherType.ENTIRE_ORDER),
(99, 9, 100, 10, VoucherType.ENTIRE_ORDER),
(99, 10, 100, 10, VoucherType.SPECIFIC_PRODUCT),
(100, 9, 100, 10, VoucherType.SPECIFIC_PRODUCT),
(99, 9, 100, 10, VoucherType.SPECIFIC_PRODUCT),
],
)
def test_shipping_voucher_checkout_discount_not_applicable_returns_zero(
total, total_quantity, min_spent_amount, min_checkout_items_quantity, voucher_type
):
voucher = Voucher(
code="unique",
type=voucher_type,
discount_value_type=DiscountValueType.FIXED,
discount_value=10,
min_spent=(
Money(min_spent_amount, "USD") if min_spent_amount is not None else None
),
min_checkout_items_quantity=min_checkout_items_quantity,
)
price = Money(total, "USD")
price = TaxedMoney(net=price, gross=price)
order = Mock(
get_subtotal=Mock(return_value=price),
get_total_quantity=Mock(return_value=total_quantity),
shipping_price=price,
voucher=voucher,
)
with pytest.raises(NotApplicable):
get_voucher_discount_for_order(order)
def test_product_voucher_checkout_discount_raises_not_applicable(
order_with_lines, product_with_images
):
discounted_product = product_with_images
voucher = Voucher(
code="unique",
type=VoucherType.SPECIFIC_PRODUCT,
discount_value_type=DiscountValueType.FIXED,
discount_value=10,
)
voucher.save()
voucher.products.add(discounted_product)
order_with_lines.voucher = voucher
order_with_lines.save()
# Offer is valid only for products listed in voucher
with pytest.raises(NotApplicable):
get_voucher_discount_for_order(order_with_lines)
def test_category_voucher_checkout_discount_raises_not_applicable(order_with_lines):
discounted_collection = Collection.objects.create(name="Discounted", slug="discou")
voucher = Voucher(
code="unique",
type=VoucherType.SPECIFIC_PRODUCT,
discount_value_type=DiscountValueType.FIXED,
discount_value=10,
)
voucher.save()
voucher.collections.add(discounted_collection)
order_with_lines.voucher = voucher
order_with_lines.save()
# Discount should be valid only for items in the discounted collections
with pytest.raises(NotApplicable):
get_voucher_discount_for_order(order_with_lines)
def test_ajax_voucher_list(admin_client, voucher):
voucher.name = "Summer sale"
voucher.save()
vouchers_list = [{"id": voucher.pk, "text": str(voucher)}]
url = reverse("dashboard:ajax-vouchers")
response = admin_client.get(url, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
resp_decoded = json.loads(response.content.decode("utf-8"))
assert response.status_code == 200
assert resp_decoded == {"results": vouchers_list}
@pytest.mark.parametrize(
"voucher_type", ["specific_product", "entire_order", "shipping"]
)
def test_voucher_form_min_spent_amount_is_changed_on_edit(
admin_client, product, collection, voucher_type
):
assert Voucher.objects.count() == 0
url = reverse("dashboard:voucher-add")
data = {
"code": "TESTVOUCHER",
"name": "Test Voucher",
"start_date": "2019-01-01",
"end_date": "2019-06-01",
"type": voucher_type,
"discount_value": "15.99",
"discount_value_type": DiscountValueType.FIXED,
"product-products": [product.pk],
"category-categories": [product.category.pk],
"collection-collections": [collection.pk],
}
data["{}-min_spent_0".format(voucher_type)] = "800"
data["{}-min_spent_1".format(voucher_type)] = "USD"
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
assert Voucher.objects.count() == 1
voucher = Voucher.objects.all()[0]
assert voucher.type == voucher_type
assert voucher.code == data["code"]
assert voucher.name == data["name"]
assert voucher.start_date.isoformat() == "2019-01-01T06:00:00+00:00"
assert voucher.end_date.isoformat() == "2019-06-01T05:00:00+00:00"
assert voucher.discount_value_type == DiscountValueType.FIXED
assert voucher.discount_value == Decimal("15.99")
assert voucher.min_spent == Money(800, "USD")
| 34.016502 | 87 | 0.684001 |
aced6e79a739ac1db9d8568fab8a479425e1fa68 | 1,686 | py | Python | setup.py | drodel/tronpy | 9e869fb508c3f77b432948cc56cd66985375843d | [
"MIT"
] | 87 | 2020-08-24T08:46:37.000Z | 2022-03-28T02:50:56.000Z | setup.py | drodel/tronpy | 9e869fb508c3f77b432948cc56cd66985375843d | [
"MIT"
] | 40 | 2020-07-17T11:15:46.000Z | 2022-03-30T01:02:16.000Z | setup.py | drodel/tronpy | 9e869fb508c3f77b432948cc56cd66985375843d | [
"MIT"
] | 50 | 2020-08-04T15:10:37.000Z | 2022-03-29T02:03:38.000Z | # -*- coding: utf-8 -*-
from setuptools import setup
packages = ['tronpy', 'tronpy.keys', 'tronpy.providers']
package_data = {'': ['*']}
install_requires = [
'base58>=2.0.0,<3.0.0',
'ecdsa>=0.15,<0.16',
'eth_abi>=2.1.1,<3.0.0',
'pycryptodome>=3.9.7,<4.0.0',
'requests>=2.23.0,<3.0.0',
'httpx==0.16.1',
]
setup_kwargs = {
'name': 'tronpy',
'version': '0.2.6',
'description': 'TRON Python client library',
'long_description': '# tronpy\n\nTRON Python Client Library.\n\n## How to use\n\n```python\nfrom tronpy import Tron\n\nclient = Tron(network=\'nile\')\n# Private key of TJzXt1sZautjqXnpjQT4xSCBHNSYgBkDr3\npriv_key = PrivateKey(bytes.fromhex("8888888888888888888888888888888888888888888888888888888888888888"))\n\ntxn = (\n client.trx.transfer("TJzXt1sZautjqXnpjQT4xSCBHNSYgBkDr3", "TVjsyZ7fYF3qLF6BQgPmTEZy1xrNNyVAAA", 1_000)\n .memo("test memo")\n .fee_limit(100_000_000)\n .build()\n .inspect()\n .sign(priv_key)\n .broadcast()\n)\n\nprint(txn)\n# > {\'result\': True, \'txid\': \'5182b96bc0d74f416d6ba8e22380e5920d8627f8fb5ef5a6a11d4df030459132\'}\nprint(txn.wait())\n# > {\'id\': \'5182b96bc0d74f416d6ba8e22380e5920d8627f8fb5ef5a6a11d4df030459132\', \'blockNumber\': 6415370, \'blockTimeStamp\': 1591951155000, \'contractResult\': [\'\'], \'receipt\': {\'net_usage\': 283}}\n```\n',
'author': 'andelf',
'author_email': 'andelf@gmail.com',
'maintainer': None,
'maintainer_email': None,
'url': 'https://github.com/andelf/tronpy',
'packages': packages,
'package_data': package_data,
'install_requires': install_requires,
'python_requires': '>=3.6,<4.0',
}
setup(**setup_kwargs)
| 48.171429 | 911 | 0.666074 |
aced6ef48bd473e77b727dd325846178f2bca13a | 879 | py | Python | pancan-analyser.py | sykhro/cancol-analysis | da1fe0f79c56a6d27057ffbadc05136e1278d520 | [
"MIT"
] | null | null | null | pancan-analyser.py | sykhro/cancol-analysis | da1fe0f79c56a6d27057ffbadc05136e1278d520 | [
"MIT"
] | null | null | null | pancan-analyser.py | sykhro/cancol-analysis | da1fe0f79c56a6d27057ffbadc05136e1278d520 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import logging as log
import os
import networkx as nx
import pandas as pd
import analysis_nx as anx
import pathways_nx as pnx
PATHWAYS_DIRECTORY = "./pathways/"
log.basicConfig(level=log.INFO)
pathways = [
pnx.pathway_to_nx(PATHWAYS_DIRECTORY + pw) for pw in os.listdir(PATHWAYS_DIRECTORY)
]
pathways.sort(key=lambda pw: pw.name)
log.info(f"Loaded {len(pathways)} pathways from {PATHWAYS_DIRECTORY}")
patients_log = pd.read_csv("TRIBE2_db.csv")
log.info(f"Loaded {len(patients_log)} patients")
mutations_data = pd.read_csv("TRIBE2_seq_res.csv")
log.info("Loaded gene mutations")
log.debug(f"Got {len(mutations_data)}")
arm0_df_indeg = anx.process_patients_with_f(
patients_log[patients_log["arm"] == 0]["PatientFirstName"],
nx.in_degree_centrality,
pathways,
mutations_data,
)
print(arm0_df_indeg.describe())
log.info("Goodbye")
| 24.416667 | 87 | 0.758817 |
aced6ef8a688efe544dd83360cbf1a76483d54e6 | 1,954 | py | Python | gefest/core/opt/operators/crossover.py | DenisSidoren/GEFEST | 12752293138b5c12caa99c8adc3fd13a9fb4c2f0 | [
"BSD-3-Clause"
] | 1 | 2022-01-19T11:40:20.000Z | 2022-01-19T11:40:20.000Z | gefest/core/opt/operators/crossover.py | DenisSidoren/GEFEST | 12752293138b5c12caa99c8adc3fd13a9fb4c2f0 | [
"BSD-3-Clause"
] | null | null | null | gefest/core/opt/operators/crossover.py | DenisSidoren/GEFEST | 12752293138b5c12caa99c8adc3fd13a9fb4c2f0 | [
"BSD-3-Clause"
] | null | null | null | import copy
import random
from multiprocessing import Pool
from gefest.core.algs.postproc.resolve_errors import postprocess
from gefest.core.opt.constraints import check_constraints
from gefest.core.structure.domain import Domain
from gefest.core.structure.structure import Structure
MAX_ITER = 50000
NUM_PROC = 1
def crossover_worker(args):
s1, s2, domain = args[0], args[1], args[2]
new_structure = copy.deepcopy(s1)
crossover_point = random.randint(1, len(new_structure.polygons))
part_1 = s1.polygons[0:crossover_point]
if not isinstance(part_1, list):
part_1 = [part_1]
part_2 = s2.polygons[crossover_point:len(s1.polygons)]
if not isinstance(part_2, list):
part_2 = [part_2]
result = copy.deepcopy(part_1)
result.extend(copy.deepcopy(part_2))
new_structure.polygons = result
new_structure = postprocess(new_structure, domain)
is_correct = check_constraints(new_structure, is_lightweight=True, domain=domain)
if not is_correct:
return None
return new_structure
def crossover(s1: Structure, s2: Structure, domain: Domain, rate=0.4):
random_val = random.random()
if random_val >= rate or len(s1.polygons) == 1 or len(s2.polygons) == 1:
if random.random() > 0.5:
return s1
else:
return s2
is_correct = False
n_iter = 0
new_structure = s1
while not is_correct and n_iter < MAX_ITER:
n_iter += 1
print('cross', n_iter)
if NUM_PROC > 1:
with Pool(NUM_PROC) as p:
new_items = p.map(crossover_worker,
[[s1, s2, domain] for _ in range(NUM_PROC)])
else:
new_items = [crossover_worker([s1, s2, domain]) for _ in range(NUM_PROC)]
for structure in new_items:
if structure is not None:
new_structure = structure
break
return new_structure
| 27.914286 | 85 | 0.651484 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.