hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1746c7e6fc020333e5401ccb9e02b20ad402ff65
| 7,313
|
py
|
Python
|
tests/helpers/test_sun.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 3
|
2020-05-18T10:18:16.000Z
|
2020-12-08T11:27:55.000Z
|
tests/helpers/test_sun.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 39
|
2016-12-16T12:40:34.000Z
|
2017-02-13T17:53:42.000Z
|
tests/helpers/test_sun.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 6
|
2020-04-10T06:21:11.000Z
|
2021-07-01T08:53:38.000Z
|
"""The tests for the Sun helpers."""
# pylint: disable=protected-access
from unittest.mock import patch
from datetime import timedelta, datetime
from homeassistant.const import SUN_EVENT_SUNRISE, SUN_EVENT_SUNSET
import homeassistant.util.dt as dt_util
import homeassistant.helpers.sun as sun
def test_next_events(hass):
"""Test retrieving next sun events."""
utc_now = datetime(2016, 11, 1, 8, 0, 0, tzinfo=dt_util.UTC)
from astral import Astral
astral = Astral()
utc_today = utc_now.date()
latitude = hass.config.latitude
longitude = hass.config.longitude
mod = -1
while True:
next_dawn = astral.dawn_utc(
utc_today + timedelta(days=mod), latitude, longitude
)
if next_dawn > utc_now:
break
mod += 1
mod = -1
while True:
next_dusk = astral.dusk_utc(
utc_today + timedelta(days=mod), latitude, longitude
)
if next_dusk > utc_now:
break
mod += 1
mod = -1
while True:
next_midnight = astral.solar_midnight_utc(
utc_today + timedelta(days=mod), longitude
)
if next_midnight > utc_now:
break
mod += 1
mod = -1
while True:
next_noon = astral.solar_noon_utc(utc_today + timedelta(days=mod), longitude)
if next_noon > utc_now:
break
mod += 1
mod = -1
while True:
next_rising = astral.sunrise_utc(
utc_today + timedelta(days=mod), latitude, longitude
)
if next_rising > utc_now:
break
mod += 1
mod = -1
while True:
next_setting = astral.sunset_utc(
utc_today + timedelta(days=mod), latitude, longitude
)
if next_setting > utc_now:
break
mod += 1
with patch("homeassistant.helpers.condition.dt_util.utcnow", return_value=utc_now):
assert next_dawn == sun.get_astral_event_next(hass, "dawn")
assert next_dusk == sun.get_astral_event_next(hass, "dusk")
assert next_midnight == sun.get_astral_event_next(hass, "solar_midnight")
assert next_noon == sun.get_astral_event_next(hass, "solar_noon")
assert next_rising == sun.get_astral_event_next(hass, SUN_EVENT_SUNRISE)
assert next_setting == sun.get_astral_event_next(hass, SUN_EVENT_SUNSET)
def test_date_events(hass):
"""Test retrieving next sun events."""
utc_now = datetime(2016, 11, 1, 8, 0, 0, tzinfo=dt_util.UTC)
from astral import Astral
astral = Astral()
utc_today = utc_now.date()
latitude = hass.config.latitude
longitude = hass.config.longitude
dawn = astral.dawn_utc(utc_today, latitude, longitude)
dusk = astral.dusk_utc(utc_today, latitude, longitude)
midnight = astral.solar_midnight_utc(utc_today, longitude)
noon = astral.solar_noon_utc(utc_today, longitude)
sunrise = astral.sunrise_utc(utc_today, latitude, longitude)
sunset = astral.sunset_utc(utc_today, latitude, longitude)
assert dawn == sun.get_astral_event_date(hass, "dawn", utc_today)
assert dusk == sun.get_astral_event_date(hass, "dusk", utc_today)
assert midnight == sun.get_astral_event_date(hass, "solar_midnight", utc_today)
assert noon == sun.get_astral_event_date(hass, "solar_noon", utc_today)
assert sunrise == sun.get_astral_event_date(hass, SUN_EVENT_SUNRISE, utc_today)
assert sunset == sun.get_astral_event_date(hass, SUN_EVENT_SUNSET, utc_today)
def test_date_events_default_date(hass):
"""Test retrieving next sun events."""
utc_now = datetime(2016, 11, 1, 8, 0, 0, tzinfo=dt_util.UTC)
from astral import Astral
astral = Astral()
utc_today = utc_now.date()
latitude = hass.config.latitude
longitude = hass.config.longitude
dawn = astral.dawn_utc(utc_today, latitude, longitude)
dusk = astral.dusk_utc(utc_today, latitude, longitude)
midnight = astral.solar_midnight_utc(utc_today, longitude)
noon = astral.solar_noon_utc(utc_today, longitude)
sunrise = astral.sunrise_utc(utc_today, latitude, longitude)
sunset = astral.sunset_utc(utc_today, latitude, longitude)
with patch("homeassistant.util.dt.now", return_value=utc_now):
assert dawn == sun.get_astral_event_date(hass, "dawn", utc_today)
assert dusk == sun.get_astral_event_date(hass, "dusk", utc_today)
assert midnight == sun.get_astral_event_date(hass, "solar_midnight", utc_today)
assert noon == sun.get_astral_event_date(hass, "solar_noon", utc_today)
assert sunrise == sun.get_astral_event_date(hass, SUN_EVENT_SUNRISE, utc_today)
assert sunset == sun.get_astral_event_date(hass, SUN_EVENT_SUNSET, utc_today)
def test_date_events_accepts_datetime(hass):
"""Test retrieving next sun events."""
utc_now = datetime(2016, 11, 1, 8, 0, 0, tzinfo=dt_util.UTC)
from astral import Astral
astral = Astral()
utc_today = utc_now.date()
latitude = hass.config.latitude
longitude = hass.config.longitude
dawn = astral.dawn_utc(utc_today, latitude, longitude)
dusk = astral.dusk_utc(utc_today, latitude, longitude)
midnight = astral.solar_midnight_utc(utc_today, longitude)
noon = astral.solar_noon_utc(utc_today, longitude)
sunrise = astral.sunrise_utc(utc_today, latitude, longitude)
sunset = astral.sunset_utc(utc_today, latitude, longitude)
assert dawn == sun.get_astral_event_date(hass, "dawn", utc_now)
assert dusk == sun.get_astral_event_date(hass, "dusk", utc_now)
assert midnight == sun.get_astral_event_date(hass, "solar_midnight", utc_now)
assert noon == sun.get_astral_event_date(hass, "solar_noon", utc_now)
assert sunrise == sun.get_astral_event_date(hass, SUN_EVENT_SUNRISE, utc_now)
assert sunset == sun.get_astral_event_date(hass, SUN_EVENT_SUNSET, utc_now)
def test_is_up(hass):
"""Test retrieving next sun events."""
utc_now = datetime(2016, 11, 1, 12, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.helpers.condition.dt_util.utcnow", return_value=utc_now):
assert not sun.is_up(hass)
utc_now = datetime(2016, 11, 1, 18, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.helpers.condition.dt_util.utcnow", return_value=utc_now):
assert sun.is_up(hass)
def test_norway_in_june(hass):
"""Test location in Norway where the sun doesn't set in summer."""
hass.config.latitude = 69.6
hass.config.longitude = 18.8
june = datetime(2016, 6, 1, tzinfo=dt_util.UTC)
print(sun.get_astral_event_date(hass, SUN_EVENT_SUNRISE, datetime(2017, 7, 25)))
print(sun.get_astral_event_date(hass, SUN_EVENT_SUNSET, datetime(2017, 7, 25)))
print(sun.get_astral_event_date(hass, SUN_EVENT_SUNRISE, datetime(2017, 7, 26)))
print(sun.get_astral_event_date(hass, SUN_EVENT_SUNSET, datetime(2017, 7, 26)))
assert sun.get_astral_event_next(hass, SUN_EVENT_SUNRISE, june) == datetime(
2016, 7, 25, 23, 23, 39, tzinfo=dt_util.UTC
)
assert sun.get_astral_event_next(hass, SUN_EVENT_SUNSET, june) == datetime(
2016, 7, 26, 22, 19, 1, tzinfo=dt_util.UTC
)
assert sun.get_astral_event_date(hass, SUN_EVENT_SUNRISE, june) is None
assert sun.get_astral_event_date(hass, SUN_EVENT_SUNSET, june) is None
| 37.695876
| 87
| 0.695064
|
b8ba04efa5fe331b32b0cf1d3ce220c04594f5e7
| 969
|
py
|
Python
|
court_scraper/platforms/oscn/runner.py
|
gitter-badger/court-scraper
|
f89b86d07d39d99c8c123717227fbc773d4933c3
|
[
"0BSD"
] | null | null | null |
court_scraper/platforms/oscn/runner.py
|
gitter-badger/court-scraper
|
f89b86d07d39d99c8c123717227fbc773d4933c3
|
[
"0BSD"
] | null | null | null |
court_scraper/platforms/oscn/runner.py
|
gitter-badger/court-scraper
|
f89b86d07d39d99c8c123717227fbc773d4933c3
|
[
"0BSD"
] | null | null | null |
import logging
from court_scraper.base.runner import BaseRunner
from .site import Site
logger = logging.getLogger(__name__)
class Runner(BaseRunner):
"""
Facade class to simplify invocation and usage of scrapers.
Arguments:
- cache_dir -- Path to cache directory for scraped file artifacts (default: {})
- config_path -- Path to location of config file
- place_id -- Scraper ID made up of state and county (e.g. ga_dekalb)
"""
def search(self, case_numbers =[], **kwargs):
"""
For a given scraper, executes the search, acquisition
and processing of case info.
Keyword arguments:
- case_numbers - List of case numbers
Returns: List of dicts containing case metadata
"""
site = Site(self.place_id)
logger.info(
"Executing search for {}".format(self.place_id)
)
data = site.search(case_numbers=case_numbers)
return data
| 24.846154
| 83
| 0.646027
|
0d69c1381744f6485dfdefdd8339729af7f8e2ff
| 272
|
py
|
Python
|
setup.py
|
stevec7/gpfs
|
9d92f2a77bc1df2d4340db607596f0b057d6203e
|
[
"MIT"
] | 2
|
2016-05-27T18:53:14.000Z
|
2020-07-18T16:49:45.000Z
|
setup.py
|
stevec7/gpfs
|
9d92f2a77bc1df2d4340db607596f0b057d6203e
|
[
"MIT"
] | null | null | null |
setup.py
|
stevec7/gpfs
|
9d92f2a77bc1df2d4340db607596f0b057d6203e
|
[
"MIT"
] | 1
|
2020-07-18T16:49:49.000Z
|
2020-07-18T16:49:49.000Z
|
try:
from setuputils import setup
except:
from distutils.core import setup
setup(
name = 'gpfs',
version = '0.0.1',
description = 'libraries and tools for GPFS',
author = 'stevec7',
author_email = 'none',
packages = ['gpfs']
)
| 12.363636
| 49
| 0.591912
|
b7b0421ddcfce8c15e92e9a9a8297236c6eb74a9
| 1,984
|
py
|
Python
|
tests/tests/correctness/EPLAnalytics/Detectors/Corridor/Corridor_cor_006/run.py
|
rpeach-sag/apama-industry-analytics-kit
|
a3f6039915501d41251b6f7ec41b0cb8111baf7b
|
[
"Apache-2.0"
] | 3
|
2019-09-02T18:21:22.000Z
|
2020-04-17T16:34:57.000Z
|
tests/tests/correctness/EPLAnalytics/Detectors/Corridor/Corridor_cor_006/run.py
|
rpeach-sag/apama-industry-analytics-kit
|
a3f6039915501d41251b6f7ec41b0cb8111baf7b
|
[
"Apache-2.0"
] | null | null | null |
tests/tests/correctness/EPLAnalytics/Detectors/Corridor/Corridor_cor_006/run.py
|
rpeach-sag/apama-industry-analytics-kit
|
a3f6039915501d41251b6f7ec41b0cb8111baf7b
|
[
"Apache-2.0"
] | null | null | null |
# $Copyright (c) 2015 Software AG, Darmstadt, Germany and/or Software AG USA Inc., Reston, VA, USA, and/or Terracotta Inc., San Francisco, CA, USA, and/or Software AG (Canada) Inc., Cambridge, Ontario, Canada, and/or, Software AG (UK) Ltd., Derby, United Kingdom, and/or Software A.G. (Israel) Ltd., Or-Yehuda, Israel and/or their licensors.$
# Use, reproduction, transfer, publication or disclosure is prohibited except as specifically provided for in your License Agreement with Software AG
from industry.framework.AnalyticsBaseTest import AnalyticsBaseTest
from pysys.constants import *
class PySysTest(AnalyticsBaseTest):
def execute(self):
# Start the correlator
correlator = self.startTest(inputLog="input.log")
self.injectAnalytic(correlator)
self.injectCorridor(correlator)
self.ready(correlator)
correlator.receive(filename='OutputRising.evt', channels=['OutputRising'])
correlator.send('Config.evt')
self.waitForSignal('correlator.out',
expr='Analytic Corridor started for inputDataNames',
condition='==1',
timeout=5)
correlator.send('Events.evt')
self.waitForSignal('input.log', expr='"Input1",com.industry.analytics\.Data', condition='==9', timeout=5)
def validate(self):
self.assertGrep('OutputRising.evt', expr='com.industry.analytics.Data.*"OutputRising","a","s1",0,5,"",1,2,3,{"anomalySource":"Corridor","duration":"0","zone":"inside"}')
self.assertGrep('OutputRising.evt', expr='com.industry.analytics.Data.*"OutputRising","a","s1",20,7,"",1,2,3,{"anomalySource":"Corridor","duration":"0","zone":"inside"}')
self.assertGrep('OutputRising.evt', expr='com.industry.analytics.Data.*"OutputRising","a","s1",25,7,"",1,2,3,{"anomalySource":"Corridor","duration":"0","zone":"inside"}')
self.assertGrep('OutputRising.evt', expr='com.industry.analytics.Data.*"OutputRising","a","s1",30,7,"",1,2,3,{"anomalySource":"Corridor","duration":"0","zone":"inside"}')
self.checkSanity()
| 64
| 343
| 0.71119
|
03a2c2a4393cbe57da16597de84cee5a7d80b9c8
| 1,869
|
py
|
Python
|
ooobuild/lo/document/x_document_event_listener.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/document/x_document_event_listener.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/document/x_document_event_listener.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.document
import typing
from abc import abstractmethod
from ..lang.x_event_listener import XEventListener as XEventListener_c7230c4a
if typing.TYPE_CHECKING:
from .document_event import DocumentEvent as DocumentEvent_f21b0da8
class XDocumentEventListener(XEventListener_c7230c4a):
"""
allows to be notified of events happening in an OfficeDocument
This interface is the successor of the XEventListener interface, which should not be used anymore.
**since**
OOo 3.1
See Also:
`API XDocumentEventListener <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1document_1_1XDocumentEventListener.html>`_
"""
__ooo_ns__: str = 'com.sun.star.document'
__ooo_full_ns__: str = 'com.sun.star.document.XDocumentEventListener'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.document.XDocumentEventListener'
@abstractmethod
def documentEventOccured(self, Event: 'DocumentEvent_f21b0da8') -> None:
"""
is called whenever a document event occurred
"""
__all__ = ['XDocumentEventListener']
| 35.264151
| 151
| 0.747994
|
0e045fb594eb16496d76adfb20ac5d3444194e4b
| 288
|
py
|
Python
|
Dataset/Leetcode/train/12/583.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/train/12/583.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/train/12/583.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
class Solution:
def XXX(self, num: int) -> str:
d={1000:'M',900:'CM',500:'D',400:'CD',100:'C',90:'XC',50:'L',40:'XL',10:'X',9:'IX',5:'V',4:'IV',1:'I'}
a=''
for i in d:
while num>=i:
a+=d[i]
num-=i
return a
| 26.181818
| 110
| 0.385417
|
39106f87bd27a11f4517bd828e5bc27a2b18fea9
| 4,670
|
py
|
Python
|
qiskit_nature/transformers/second_quantization/freeze_core_transformer.py
|
Durd3nT/qiskit-nature
|
83d9d7caf409f5519276413f754b5c8fc901d3c0
|
[
"Apache-2.0"
] | null | null | null |
qiskit_nature/transformers/second_quantization/freeze_core_transformer.py
|
Durd3nT/qiskit-nature
|
83d9d7caf409f5519276413f754b5c8fc901d3c0
|
[
"Apache-2.0"
] | 1
|
2021-11-11T06:33:41.000Z
|
2021-11-11T06:33:41.000Z
|
qiskit_nature/transformers/second_quantization/freeze_core_transformer.py
|
Durd3nT/qiskit-nature
|
83d9d7caf409f5519276413f754b5c8fc901d3c0
|
[
"Apache-2.0"
] | 1
|
2021-10-05T20:58:04.000Z
|
2021-10-05T20:58:04.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Freeze-Core Reduction interface."""
from typing import List, Optional
from qiskit_nature.drivers.second_quantization import QMolecule
from .active_space_transformer import ActiveSpaceTransformer
class FreezeCoreTransformer(ActiveSpaceTransformer):
"""The Freeze-Core reduction."""
def __init__(
self,
freeze_core: bool = True,
remove_orbitals: Optional[List[int]] = None,
) -> None:
"""Initializes a transformer which reduces a `QMolecule` by removing some molecular
orbitals.
The orbitals to be removed are specified in two ways:
1. When `freeze_core` is enabled (the default), the `core_orbitals` listed in the
`QMolecule` are made inactive and removed in the same fashion as in the
:class:`ActiveSpaceTransformer`.
2. Additionally, unoccupied molecular orbitals can be removed via a list of indices
passed to `remove_orbitals`. It is the user's responsibility to ensure that these are
indeed unoccupied orbitals, as no checks are performed.
If you want to remove additional occupied orbitals, please use the
:class:`ActiveSpaceTransformer` instead.
Args:
freeze_core: A boolean indicating whether to remove the molecular orbitals specified by
`QMolecule.core_orbitals`.
remove_orbitals: A list of indices specifying molecular orbitals which are removed.
No checks are performed on the nature of these orbitals, so the user
must make sure that these are _unoccupied_ orbitals, which can be
removed without taking any energy shifts into account.
"""
self._freeze_core = freeze_core
self._remove_orbitals = remove_orbitals
super().__init__()
def transform(self, molecule_data: QMolecule) -> QMolecule:
"""Reduces the given `QMolecule` by removing the core and optionally defined unoccupied
molecular orbitals.
Args:
molecule_data: the `QMolecule` to be transformed.
Returns:
A new `QMolecule` instance.
Raises:
QiskitNatureError: If more electrons or orbitals are requested than are available, if an
uneven number of inactive electrons remains, or if the number of
selected active orbital indices does not match
`num_molecular_orbitals`.
"""
molecule_data_new = super().transform(molecule_data)
def rename_dict_key(energy_shift_dict):
try:
energy_shift_dict["FreezeCoreTransformer"] = energy_shift_dict.pop(
"ActiveSpaceTransformer"
)
except KeyError:
pass
rename_dict_key(molecule_data_new.energy_shift)
rename_dict_key(molecule_data_new.x_dip_energy_shift)
rename_dict_key(molecule_data_new.y_dip_energy_shift)
rename_dict_key(molecule_data_new.z_dip_energy_shift)
return molecule_data_new
def _check_configuration(self):
pass
def _determine_active_space(self, molecule_data: QMolecule):
nelec_total = molecule_data.num_alpha + molecule_data.num_beta
inactive_orbs_idxs = molecule_data.core_orbitals
if self._remove_orbitals is not None:
inactive_orbs_idxs.extend(self._remove_orbitals)
active_orbs_idxs = [
o for o in range(molecule_data.num_molecular_orbitals) if o not in inactive_orbs_idxs
]
self._active_orbitals = active_orbs_idxs
self._num_molecular_orbitals = len(active_orbs_idxs)
# compute number of active electrons
nelec_inactive = int(sum([self._mo_occ_total[o] for o in inactive_orbs_idxs]))
nelec_active = nelec_total - nelec_inactive
num_alpha = (nelec_active - (molecule_data.multiplicity - 1)) // 2
num_beta = nelec_active - num_alpha
self._num_particles = (num_alpha, num_beta)
return (active_orbs_idxs, inactive_orbs_idxs)
| 40.608696
| 100
| 0.668951
|
42dfb17b4a8aed3451befa8061e3090662b1ff4f
| 4,122
|
py
|
Python
|
backend/src/features/authentication/services.py
|
ihsaro/socioworld
|
d8054afe152bd841eb58d6870e781917a0f6a511
|
[
"Apache-2.0"
] | null | null | null |
backend/src/features/authentication/services.py
|
ihsaro/socioworld
|
d8054afe152bd841eb58d6870e781917a0f6a511
|
[
"Apache-2.0"
] | null | null | null |
backend/src/features/authentication/services.py
|
ihsaro/socioworld
|
d8054afe152bd841eb58d6870e781917a0f6a511
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime, timedelta
from typing import Union
from jose import jwt
from passlib.context import CryptContext
from sqlalchemy.orm.session import Session
from configurations.constants.security import (
ACCESS_TOKEN_EXPIRE_MINUTES,
ALGORITHM,
SECRET_KEY
)
from configurations.messages.error.service.authentication import AuthenticationServiceErrorMessages
from configurations.messages.success.service.authentication import AuthenticationServiceSuccessMessages
from configurations.types import Error, Success
from features.authentication.entities import (
ApplicationUser,
BlacklistedToken,
Roles
)
from features.authentication.mappers import (
map_application_user_to_registered_user,
map_token_to_blacklist_token,
map_user_registration_details_to_application_user
)
from features.authentication.models import (
LoginCredentials,
TokenCreated,
UserRegistrationDetails,
RegisteredUser
)
from features.authentication import repositories as authentication_repositories
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
def __generate_jwt_token__(*, to_encode: dict):
to_encode_copy = to_encode.copy()
expires_in = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
to_encode_copy.update({"exp": expires_in})
return jwt.encode(to_encode_copy, SECRET_KEY, algorithm=ALGORITHM)
def __get_hashed_password__(*, password: str) -> str:
return pwd_context.hash(password)
def __verify_password__(plain_password: str, hashed_password: str) -> bool:
return pwd_context.verify(plain_password, hashed_password)
def login(*, database: Session, login_credentials: LoginCredentials) -> Union[TokenCreated, Error]:
application_user = authentication_repositories.get_application_user_for_login(
database=database,
username=login_credentials.username
)
if application_user is None:
return Error(
code=AuthenticationServiceErrorMessages.INVALID_CREDENTIALS.name,
message=AuthenticationServiceErrorMessages.INVALID_CREDENTIALS.value
)
else:
if __verify_password__(
plain_password=login_credentials.password,
hashed_password=application_user.password
):
access_token = __generate_jwt_token__(to_encode={"sub": str(application_user.id)})
return TokenCreated(access_token=access_token, token_type="bearer")
else:
return Error(
code=AuthenticationServiceErrorMessages.INVALID_CREDENTIALS.name,
message=AuthenticationServiceErrorMessages.INVALID_CREDENTIALS.value
)
def register(*, user_registration_details: UserRegistrationDetails, role: Roles, database: Session) -> Union[
RegisteredUser,
Error
]:
application_user_to_be_created = map_user_registration_details_to_application_user(
user_registration_details=user_registration_details
)
application_user_to_be_created.password = __get_hashed_password__(password=application_user_to_be_created.password)
application_user_to_be_created.role = role
application_user = authentication_repositories.create_application_user(
database=database,
application_user=application_user_to_be_created
)
if isinstance(application_user, ApplicationUser):
return map_application_user_to_registered_user(application_user=application_user)
elif isinstance(application_user, Error):
return application_user
def blacklist_token(*, token: str, database: Session) -> Union[
Success,
Error
]:
blacklisted_token = authentication_repositories.add_token_to_blacklist(
database=database,
token=map_token_to_blacklist_token(token=token)
)
if isinstance(blacklisted_token, BlacklistedToken):
return Success(
code=AuthenticationServiceSuccessMessages.TOKEN_BLACKLISTED.name,
message=AuthenticationServiceSuccessMessages.TOKEN_BLACKLISTED.value
)
elif isinstance(blacklisted_token, Error):
return blacklisted_token
| 35.230769
| 119
| 0.767831
|
e12bab2e490732f0cde9c2ad10abf7b32f5d5a58
| 9,257
|
py
|
Python
|
src/pandas_profiling/model/alerts.py
|
Anselmoo/pandas-profiling
|
41ee043175eaa1c5b21fcba178110331adcad713
|
[
"MIT"
] | 736
|
2016-01-14T03:36:03.000Z
|
2018-01-06T00:56:33.000Z
|
src/pandas_profiling/model/alerts.py
|
Anselmoo/pandas-profiling
|
41ee043175eaa1c5b21fcba178110331adcad713
|
[
"MIT"
] | 72
|
2016-01-29T12:08:04.000Z
|
2018-01-06T11:18:44.000Z
|
src/pandas_profiling/model/alerts.py
|
sthagen/pandas-profiling-pandas-profiling
|
6fd50055126ebebf74c92c6f908f54fa7cd9c816
|
[
"MIT"
] | 108
|
2016-01-14T11:48:18.000Z
|
2018-01-02T13:35:10.000Z
|
"""Logic for alerting the user on possibly problematic patterns in the data (e.g. high number of zeros , constant
values, high correlations)."""
from enum import Enum, auto, unique
from typing import Any, Dict, List, Optional, Set
import numpy as np
import pandas as pd
from pandas_profiling.config import Settings
from pandas_profiling.model.correlations import perform_check_correlation
@unique
class AlertType(Enum):
"""Alert types"""
CONSTANT = auto()
"""This variable has a constant value."""
ZEROS = auto()
"""This variable contains zeros."""
HIGH_CORRELATION = auto()
"""This variable is highly correlated."""
HIGH_CARDINALITY = auto()
"""This variable has a high cardinality."""
UNSUPPORTED = auto()
"""This variable is unsupported."""
DUPLICATES = auto()
"""This variable contains duplicates."""
SKEWED = auto()
"""This variable is highly skewed."""
MISSING = auto()
"""This variable contains missing values."""
INFINITE = auto()
"""This variable contains infinite values."""
TYPE_DATE = auto()
"""This variable is likely a datetime, but treated as categorical."""
UNIQUE = auto()
"""This variable has unique values."""
CONSTANT_LENGTH = auto()
"""This variable has a constant length"""
REJECTED = auto()
"""Variables are rejected if we do not want to consider them for further analysis."""
UNIFORM = auto()
"""The variable is uniformly distributed"""
EMPTY = auto()
"""The DataFrame is empty"""
class Alert:
"""An alert object (type, values, column)."""
_anchor_id: Optional[str] = None
def __init__(
self,
alert_type: AlertType,
values: Optional[Dict] = None,
column_name: Optional[str] = None,
fields: Optional[Set] = None,
):
if values is None:
values = {}
if fields is None:
fields = set()
self.fields = fields
self.alert_type = alert_type
self.values = values
self.column_name = column_name
@property
def anchor_id(self) -> Optional[str]:
if self._anchor_id is None:
self._anchor_id = str(hash(self.column_name))
return self._anchor_id
def fmt(self) -> str:
# TODO: render in template
name = self.alert_type.name.replace("_", " ")
if name == "HIGH CORRELATION":
num = len(self.values["fields"])
title = ", ".join(self.values["fields"])
name = f'<abbr title="This variable has a high correlation with {num} fields: {title}">HIGH CORRELATION</abbr>'
return name
def __repr__(self):
alert_type = self.alert_type.name
column = self.column_name
return f"[{alert_type}] alert on column {column}"
def check_table_alerts(table: dict) -> List[Alert]:
"""Checks the overall dataset for alerts.
Args:
table: Overall dataset statistics.
Returns:
A list of alerts.
"""
alerts = []
if alert_value(table.get("n_duplicates", np.nan)):
alerts.append(
Alert(
alert_type=AlertType.DUPLICATES,
values=table,
fields={"n_duplicates"},
)
)
if table["n"] == 0:
alerts.append(
Alert(
alert_type=AlertType.EMPTY,
values=table,
fields={"n"},
)
)
return alerts
def numeric_alerts(config: Settings, summary: dict) -> List[Alert]:
alerts = []
# Skewness
if skewness_alert(summary["skewness"], config.vars.num.skewness_threshold):
alerts.append(
Alert(
alert_type=AlertType.SKEWED,
fields={"skewness"},
)
)
# Infinite values
if alert_value(summary["p_infinite"]):
alerts.append(
Alert(
alert_type=AlertType.INFINITE,
fields={"p_infinite", "n_infinite"},
)
)
# Zeros
if alert_value(summary["p_zeros"]):
alerts.append(
Alert(
alert_type=AlertType.ZEROS,
fields={"n_zeros", "p_zeros"},
)
)
if (
"chi_squared" in summary
and summary["chi_squared"]["pvalue"] > config.vars.num.chi_squared_threshold
):
alerts.append(Alert(alert_type=AlertType.UNIFORM))
return alerts
def categorical_alerts(config: Settings, summary: dict) -> List[Alert]:
alerts = []
# High cardinality
if summary.get("n_distinct", np.nan) > config.vars.cat.cardinality_threshold:
alerts.append(
Alert(
alert_type=AlertType.HIGH_CARDINALITY,
fields={"n_distinct"},
)
)
if (
"chi_squared" in summary
and summary["chi_squared"]["pvalue"] > config.vars.cat.chi_squared_threshold
):
alerts.append(Alert(alert_type=AlertType.UNIFORM))
if summary.get("date_warning"):
alerts.append(Alert(alert_type=AlertType.TYPE_DATE))
# Constant length
if "composition" in summary and summary["min_length"] == summary["max_length"]:
alerts.append(
Alert(
alert_type=AlertType.CONSTANT_LENGTH,
fields={"composition_min_length", "composition_max_length"},
)
)
return alerts
def generic_alerts(summary: dict) -> List[Alert]:
alerts = []
# Missing
if alert_value(summary["p_missing"]):
alerts.append(
Alert(
alert_type=AlertType.MISSING,
fields={"p_missing", "n_missing"},
)
)
return alerts
def supported_alerts(summary: dict) -> List[Alert]:
alerts = []
if summary.get("n_distinct", np.nan) == summary["n"]:
alerts.append(
Alert(
alert_type=AlertType.UNIQUE,
fields={"n_distinct", "p_distinct", "n_unique", "p_unique"},
)
)
if summary.get("n_distinct", np.nan) == 1:
summary["mode"] = summary["value_counts_without_nan"].index[0]
alerts.append(
Alert(
alert_type=AlertType.CONSTANT,
fields={"n_distinct"},
)
)
alerts.append(
Alert(
alert_type=AlertType.REJECTED,
fields=set(),
)
)
return alerts
def unsupported_alerts(summary: Dict[str, Any]) -> List[Alert]:
alerts = [
Alert(
alert_type=AlertType.UNSUPPORTED,
fields=set(),
),
Alert(
alert_type=AlertType.REJECTED,
fields=set(),
),
]
return alerts
def check_variable_alerts(config: Settings, col: str, description: dict) -> List[Alert]:
"""Checks individual variables for alerts.
Args:
col: The column name that is checked.
description: The series description.
Returns:
A list of alerts.
"""
alerts = []
alerts += generic_alerts(description)
if description["type"] == "Unsupported":
alerts += unsupported_alerts(description)
else:
alerts += supported_alerts(description)
if description["type"] == "Categorical":
alerts += categorical_alerts(config, description)
if description["type"] == "Numeric":
alerts += numeric_alerts(config, description)
for idx in range(len(alerts)):
alerts[idx].column_name = col
alerts[idx].values = description
return alerts
def check_correlation_alerts(config: Settings, correlations: dict) -> List[Alert]:
alerts = []
for corr, matrix in correlations.items():
if config.correlations[corr].warn_high_correlations:
threshold = config.correlations[corr].threshold
correlated_mapping = perform_check_correlation(matrix, threshold)
if len(correlated_mapping) > 0:
for k, v in correlated_mapping.items():
alerts.append(
Alert(
column_name=k,
alert_type=AlertType.HIGH_CORRELATION,
values={"corr": corr, "fields": v},
)
)
return alerts
def get_alerts(
config: Settings, table_stats: dict, series_description: dict, correlations: dict
) -> List[Alert]:
alerts = check_table_alerts(table_stats)
for col, description in series_description.items():
alerts += check_variable_alerts(config, col, description)
alerts += check_correlation_alerts(config, correlations)
alerts.sort(key=lambda alert: str(alert.alert_type))
return alerts
def alert_value(value: float) -> bool:
return not np.isnan(value) and value > 0.01
def skewness_alert(v: float, threshold: int) -> bool:
return not np.isnan(v) and (v < (-1 * threshold) or v > threshold)
def type_date_alert(series: pd.Series) -> bool:
from dateutil.parser import ParserError, parse
try:
series.apply(parse)
except ParserError:
return False
else:
return True
| 27.226471
| 123
| 0.583558
|
b8213bca651264e962313f5bfff353d674b722e4
| 346
|
py
|
Python
|
ex017.py
|
felipenunes07/Python-Exercicios--Curso-em-video
|
97087c2fcd6dda48894954623b8b83100af3f47f
|
[
"MIT"
] | null | null | null |
ex017.py
|
felipenunes07/Python-Exercicios--Curso-em-video
|
97087c2fcd6dda48894954623b8b83100af3f47f
|
[
"MIT"
] | null | null | null |
ex017.py
|
felipenunes07/Python-Exercicios--Curso-em-video
|
97087c2fcd6dda48894954623b8b83100af3f47f
|
[
"MIT"
] | null | null | null |
## Faça um programa que leia o comprimento do cateto oposto e do cateto adjacente de um triângulo retângulo. Calcule e mostre o comprimento da hipotenusa. ##
import math
a = float(input('Comprimento do cateto oposto: '))
b = float(input('Comprimento do cateto adjacente: '))
print('A hipotenusa vai medir {}.'.format(math.sqrt((a**2)+(b**2))))
| 43.25
| 157
| 0.728324
|
3ae12cebe3cf2b6d5b1e436637b73fb2fb8abe7a
| 1,495
|
py
|
Python
|
laia/utils/convert_old_model_checkpoint.py
|
basbeu/PyLaia
|
d14458484b56622204b1730a7d53220c5d0f1bc1
|
[
"MIT"
] | 2
|
2020-09-10T13:31:17.000Z
|
2021-07-31T09:44:17.000Z
|
laia/utils/convert_old_model_checkpoint.py
|
basbeu/PyLaia
|
d14458484b56622204b1730a7d53220c5d0f1bc1
|
[
"MIT"
] | 1
|
2020-12-06T18:11:52.000Z
|
2020-12-06T18:19:38.000Z
|
laia/utils/convert_old_model_checkpoint.py
|
basbeu/PyLaia
|
d14458484b56622204b1730a7d53220c5d0f1bc1
|
[
"MIT"
] | 2
|
2020-04-20T13:40:56.000Z
|
2020-10-17T11:59:55.000Z
|
from __future__ import absolute_import
import argparse
import os
import re
from collections import OrderedDict
import torch
def convert_old_parameters(params):
"""Convert parameters from the old model to the new one."""
# type: OrderedDict -> OrderedDict
new_params = []
for k, v in params.items():
m = re.match(r"^conv_block([0-9]+)\.([a-z_.]+)$", k)
if m:
if m.group(2) == "poolsize":
pass
else:
new_params.append(("conv.{}.{}".format(m.group(1), m.group(2)), v))
elif k[0] == "_":
pass
else:
new_params.append((k, v))
return OrderedDict(new_params)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input_checkpoint", help="File path of the input checkpoint")
parser.add_argument("output_checkpoint", help="File path of the output checkpoint")
args = parser.parse_args()
# Check input checkpoint
assert os.path.isfile(args.input_checkpoint), "{!r} is not a file".format(
args.input_checkpoint
)
# Prepare directory for the output checkpoint
outdir = os.path.dirname(args.output_checkpoint)
if os.path.exists(outdir):
assert os.path.isdir(outdir), "{!r} is not a directory".format(outdir)
else:
os.makedirs(outdir)
params = torch.load(args.input_checkpoint)
params = convert_old_parameters(params)
torch.save(params, args.output_checkpoint)
| 31.145833
| 87
| 0.641472
|
90522cd8ef6460753ff4e627ee208d071fdcb1ec
| 432
|
py
|
Python
|
aula3/teste2aula3.py
|
otaviobizulli/python-exercices
|
2c61f014bf481fa463721b174ddd4238bf8d0cb3
|
[
"MIT"
] | null | null | null |
aula3/teste2aula3.py
|
otaviobizulli/python-exercices
|
2c61f014bf481fa463721b174ddd4238bf8d0cb3
|
[
"MIT"
] | null | null | null |
aula3/teste2aula3.py
|
otaviobizulli/python-exercices
|
2c61f014bf481fa463721b174ddd4238bf8d0cb3
|
[
"MIT"
] | null | null | null |
placa = int(input('Insira apenas a parte numerica da placa: '))
if placa > 9999 or placa < 1000:
print('Placa Inválida.')
else:
pn = placa % 10
if pn == 1 or pn == 2:
print('Segunda-feira')
elif pn == 3 or pn == 4:
print ('Terça-feira.')
elif pn == 5 or pn == 6:
print ('Quarta-feira.')
elif pn == 7 or pn == 8:
print('Quinta-feira.')
else:
print('Sexta-feira.')
| 25.411765
| 63
| 0.527778
|
50988929c3b393861ab787ab42893eb7720d3d9c
| 356
|
py
|
Python
|
app/actions/server.py
|
Zhinon/fuego-de-quasar
|
ccb007f17215a37d34a935d7a06d49f13aed25da
|
[
"MIT"
] | null | null | null |
app/actions/server.py
|
Zhinon/fuego-de-quasar
|
ccb007f17215a37d34a935d7a06d49f13aed25da
|
[
"MIT"
] | null | null | null |
app/actions/server.py
|
Zhinon/fuego-de-quasar
|
ccb007f17215a37d34a935d7a06d49f13aed25da
|
[
"MIT"
] | null | null | null |
from app.actions import (
topsecret,
topsecret_split,
)
ACTIONS_MAP = {
# Topsecret
'post_get_coords_and_message': topsecret.GetCoordsAndMessageAction,
# Topsecret_split
'get_coords_and_message_split': topsecret_split.GetCoordsAndMessageSplit,
'post_message_and_distance_split': topsecret_split.PostMessageAndDistanceSplit,
}
| 25.428571
| 83
| 0.786517
|
097ac0da6408df0e8d1779126307635d40be9b8f
| 2,697
|
py
|
Python
|
smd/gametest/pruning.py
|
SMltd/SMCrypto
|
d07723a2ee6183577bb8449eaa4b2110ac653c3f
|
[
"MIT"
] | null | null | null |
smd/gametest/pruning.py
|
SMltd/SMCrypto
|
d07723a2ee6183577bb8449eaa4b2110ac653c3f
|
[
"MIT"
] | null | null | null |
smd/gametest/pruning.py
|
SMltd/SMCrypto
|
d07723a2ee6183577bb8449eaa4b2110ac653c3f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (C) 2018 The Xaya developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from mover import MoverTest
"""
Tests basic operation with pruning enabled.
"""
# Regexp for the log that is printed when a pruned block would be needed.
FAILED_GETTING_UNDO = 'Failed to retrieve undo data'
class PruningTest (MoverTest):
def run (self):
self.generate (101)
self.expectGameState ({"players": {}})
# Test that basic forward-processing works even with no kept blocks.
self.setPruning (0)
self.move ("a", "k", 2)
self.move ("b", "y", 1)
self.generate (1)
self.expectGameState ({"players": {
"a": {"x": 0, "y": 1, "dir": "up", "steps": 1},
"b": {"x": -1, "y": 1},
}})
self.move ("a", "l", 2)
self.generate (1)
self.expectGameState ({"players": {
"a": {"x": 1, "y": 1, "dir": "right", "steps": 1},
"b": {"x": -1, "y": 1},
}})
self.stopGameDaemon ()
assert not self.gamenode.logMatches (FAILED_GETTING_UNDO)
# Enable pruning while keeping at least one block, so that we can reorg.
self.setPruning (1)
txid = self.move ("a", "j", 1)
self.generate (1)
self.expectGameState ({"players": {
"a": {"x": 1, "y": 0},
"b": {"x": -1, "y": 1},
}})
blk = self.rpc.xaya.getbestblockhash ()
self.rpc.xaya.invalidateblock (blk)
# The previous move of "a" should have been put back into the mempool.
assert self.rpc.xaya.getrawmempool () == [txid]
self.move ("b", "n", 1)
self.generate (1)
self.expectGameState ({"players": {
"a": {"x": 1, "y": 0},
"b": {"x": 0, "y": 0},
}})
self.stopGameDaemon ()
assert not self.gamenode.logMatches (FAILED_GETTING_UNDO)
# Produce a longer reorg, which leads to a resync from scratch. This still
# "works" (due to the resync), but prints a log message that we can look for
# to ensure things work as expected.
self.setPruning (1)
blk = self.rpc.xaya.getbestblockhash ()
self.generate (1)
self.expectGameState ({"players": {
"a": {"x": 1, "y": 0},
"b": {"x": 0, "y": 0},
}})
self.rpc.xaya.invalidateblock (blk)
self.stopGameDaemon ()
assert self.gamenode.logMatches (FAILED_GETTING_UNDO)
def setPruning (self, value):
"""
Restarts the game daemon to change the pruning setting to the given value.
"""
self.log.info ("Setting pruning to %d" % value)
self.stopGameDaemon ()
self.startGameDaemon (extraArgs=["--enable_pruning=%d" % value])
if __name__ == "__main__":
PruningTest ().main ()
| 28.389474
| 80
| 0.607712
|
42d276620531a23ab882c3016800fe5e26713419
| 151
|
py
|
Python
|
vien_nchl/web/apps.py
|
thapgan/igh
|
0c0e69f7925e31812fec2c4616d8304a27abb518
|
[
"MIT"
] | null | null | null |
vien_nchl/web/apps.py
|
thapgan/igh
|
0c0e69f7925e31812fec2c4616d8304a27abb518
|
[
"MIT"
] | null | null | null |
vien_nchl/web/apps.py
|
thapgan/igh
|
0c0e69f7925e31812fec2c4616d8304a27abb518
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class WebConfig(AppConfig):
name = 'vien_nchl.web'
verbose_name = "Web"
def ready(self):
pass
| 15.1
| 33
| 0.655629
|
12876ce3ae75aec449eb82ed67a8b9e7dc4aaa8f
| 16,142
|
py
|
Python
|
docs/tests.py
|
harunpehlivan/djangoproject.com
|
a74c2062316c9c571d8e8f978b21b0748a908496
|
[
"BSD-3-Clause"
] | null | null | null |
docs/tests.py
|
harunpehlivan/djangoproject.com
|
a74c2062316c9c571d8e8f978b21b0748a908496
|
[
"BSD-3-Clause"
] | null | null | null |
docs/tests.py
|
harunpehlivan/djangoproject.com
|
a74c2062316c9c571d8e8f978b21b0748a908496
|
[
"BSD-3-Clause"
] | 1
|
2020-07-03T12:43:30.000Z
|
2020-07-03T12:43:30.000Z
|
import datetime
import os
from http import HTTPStatus
from operator import attrgetter
from pathlib import Path
from django.conf import settings
from django.contrib.sites.models import Site
from django.template import Context, Template
from django.test import TestCase
from django.urls import reverse, set_urlconf
from djangoproject.urls import www as www_urls
from releases.models import Release
from .models import Document, DocumentRelease
from .search import DOCUMENT_SEARCH_VECTOR
from .sitemaps import DocsSitemap
from .utils import get_doc_path
class ModelsTests(TestCase):
def test_dev_is_supported(self):
"""
Document for a release without a date ("dev") is supported.
"""
d = DocumentRelease.objects.create()
self.assertTrue(d.is_supported)
self.assertTrue(d.is_dev)
def test_current_is_supported(self):
"""
Document with a release without an EOL date is supported.
"""
today = datetime.date.today()
day = datetime.timedelta(1)
r = Release.objects.create(version='1.8',
date=today - 5 * day)
d = DocumentRelease.objects.create(release=r)
self.assertTrue(d.is_supported)
self.assertFalse(d.is_dev)
def test_previous_is_supported(self):
"""
Document with a release with an EOL date in the future is supported.
"""
today = datetime.date.today()
day = datetime.timedelta(1)
r = Release.objects.create(version='1.8',
date=today - 5 * day,
eol_date=today + 5 * day)
d = DocumentRelease.objects.create(release=r)
self.assertTrue(d.is_supported)
self.assertFalse(d.is_dev)
def test_old_is_unsupported(self):
"""
Document with a release with an EOL date in the past is insupported.
"""
today = datetime.date.today()
day = datetime.timedelta(1)
r = Release.objects.create(version='1.8',
date=today - 15 * day,
eol_date=today - 5 * day)
d = DocumentRelease.objects.create(release=r)
self.assertFalse(d.is_supported)
self.assertFalse(d.is_dev)
def test_most_recent_micro_release_considered(self):
"""
Dates are looked up on the latest micro release in a given series.
"""
today = datetime.date.today()
day = datetime.timedelta(1)
r = Release.objects.create(version='1.8',
date=today - 15 * day)
d = DocumentRelease.objects.create(release=r)
r2 = Release.objects.create(version='1.8.1',
date=today - 5 * day)
# The EOL date of the first release is set automatically.
r.refresh_from_db()
self.assertEqual(r.eol_date, r2.date)
# Since 1.8.1 is still supported, docs show up as supported.
self.assertTrue(d.is_supported)
self.assertFalse(d.is_dev)
class ManagerTests(TestCase):
@classmethod
def setUpTestData(cls):
r1 = Release.objects.create(version='1.0')
r2 = Release.objects.create(version='2.0')
DocumentRelease.objects.bulk_create(
DocumentRelease(lang=lang, release=release)
for lang, release in [('en', r1), ('en', r2), ('sv', r1), ('ar', r1)]
)
def test_by_version(self):
doc_releases = DocumentRelease.objects.by_version('1.0')
self.assertEqual(
{(r.lang, r.release.version) for r in doc_releases},
{('en', '1.0'), ('sv', '1.0'), ('ar', '1.0')}
)
def test_get_by_version_and_lang_exists(self):
doc = DocumentRelease.objects.get_by_version_and_lang('1.0', 'en')
self.assertEqual(doc.release.version, '1.0')
self.assertEqual(doc.lang, 'en')
def test_get_by_version_and_lang_missing(self):
with self.assertRaises(DocumentRelease.DoesNotExist):
DocumentRelease.objects.get_by_version_and_lang('2.0', 'sv')
def test_get_available_languages_by_version(self):
get = DocumentRelease.objects.get_available_languages_by_version
self.assertEqual(list(get('1.0')), ['ar', 'en', 'sv'])
self.assertEqual(list(get('2.0')), ['en'])
self.assertEqual(list(get('3.0')), [])
class RedirectsTests(TestCase):
@classmethod
def tearDownClass(cls):
# cleanup URLconfs changed by django-hosts
set_urlconf(None)
super().tearDownClass()
def test_team_url(self):
# This URL is linked from the docs.
self.assertEqual('/foundation/teams/', reverse('members:teams', urlconf=www_urls))
def test_internals_team(self):
response = self.client.get(
'/en/dev/internals/team/',
HTTP_HOST='docs.djangoproject.dev:8000',
)
self.assertRedirects(
response,
'https://www.djangoproject.com/foundation/teams/',
status_code=HTTPStatus.MOVED_PERMANENTLY,
fetch_redirect_response=False,
)
class SearchFormTestCase(TestCase):
fixtures = ['doc_test_fixtures']
def setUp(self):
# We need to create an extra Site because docs have SITE_ID=2
Site.objects.create(name='Django test', domain="example2.com")
@classmethod
def tearDownClass(cls):
# cleanup URLconfs changed by django-hosts
set_urlconf(None)
super().tearDownClass()
def test_empty_get(self):
response = self.client.get('/en/dev/search/',
HTTP_HOST='docs.djangoproject.dev:8000')
self.assertEqual(response.status_code, 200)
class TemplateTagTests(TestCase):
def test_pygments_template_tag(self):
template = Template('''
{% load docs %}
{% pygment 'python' %}
def band_listing(request):
"""A view of all bands."""
bands = models.Band.objects.all()
return render(request, 'bands/band_listing.html', {'bands': bands})
{% endpygment %}
''')
self.assertHTMLEqual(
template.render(Context()),
"""
<div class="highlight">
<pre>
<span></span>
<span class="k">def</span><span class="nf">band_listing</span>
<span class="p">(</span><span class="n">request</span>
<span class="p">):</span>
<span class="sd">"""A view of all bands."""</span>
<span class="n">bands</span> <span class="o">=</span>
<span class="n">models</span><span class="o">.</span>
<span class="n">Band</span><span class="o">.</span>
<span class="n">objects</span><span class="o">.</span>
<span class="n">all</span><span class="p">()</span>
<span class="k">return</span> <span class="n">render</span>
<span class="p">(</span><span class="n">request</span>
<span class="p">,</span>
<span class="s1">'bands/band_listing.html'</span>
<span class="p">,</span> <span class="p">{</span>
<span class="s1">'bands'</span><span class="p">:</span>
<span class="n">bands</span><span class="p">})</span>
</pre>
</div>
"""
)
class TestUtils(TestCase):
def test_get_doc_path(self):
# non-existent file
self.assertEqual(get_doc_path(Path('root'), 'subpath.txt'), None)
# existing file
path, filename = __file__.rsplit(os.path.sep, 1)
self.assertEqual(get_doc_path(Path(path), filename), None)
class UpdateDocTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.release = DocumentRelease.objects.create()
def test_sync_to_db(self):
self.release.sync_to_db([{
'body': 'This is the body',
'title': 'This is the title',
'current_page_name': 'foo/bar',
}])
self.assertQuerysetEqual(self.release.documents.all(), ['<Document: en/dev/foo/bar>'])
def test_clean_path(self):
self.release.sync_to_db([{
'body': 'This is the body',
'title': 'This is the title',
'current_page_name': 'foo/bar/index',
}])
self.assertQuerysetEqual(self.release.documents.all(), ['<Document: en/dev/foo/bar>'])
def test_title_strip_tags(self):
self.release.sync_to_db([{
'body': 'This is the body',
'title': 'This is the <strong>title</strong>',
'current_page_name': 'foo/bar',
}])
self.assertQuerysetEqual(self.release.documents.all(), ['This is the title'], transform=attrgetter('title'))
def test_title_entities(self):
self.release.sync_to_db([{
'body': 'This is the body',
'title': 'Title & title',
'current_page_name': 'foo/bar',
}])
self.assertQuerysetEqual(self.release.documents.all(), ['Title & title'], transform=attrgetter('title'))
def test_empty_documents(self):
self.release.sync_to_db([
{'title': 'Empty body document', 'current_page_name': 'foo/1'},
{'body': 'Empty title document', 'current_page_name': 'foo/2'},
{'current_page_name': 'foo/3'},
])
self.assertQuerysetEqual(self.release.documents.all(), [])
def test_excluded_documents(self):
"""
Documents aren't created for partially translated documents excluded
from robots indexing.
"""
# Read the first Disallow line of robots.txt.
robots_path = settings.BASE_DIR.joinpath('djangoproject', 'static', 'robots.docs.txt')
with open(str(robots_path), 'r') as fh:
for line in fh:
if line.startswith("Disallow:"):
break
_, lang, version, path = line.strip().split('/')
release = DocumentRelease.objects.create(
lang=lang, release=Release.objects.create(version=version),
)
release.sync_to_db([
{'body': '', 'title': '', 'current_page_name': 'nonexcluded/bar'},
{'body': '', 'title': '', 'current_page_name': '%s/bar' % path},
])
self.assertQuerysetEqual(
release.documents.all(),
['<Document: %s/%s/nonexcluded/bar>' % (lang, version)]
)
class SitemapTests(TestCase):
fixtures = ['doc_test_fixtures']
@classmethod
def tearDownClass(cls):
# cleanup URLconfs changed by django-hosts
set_urlconf(None)
super().tearDownClass()
def test_sitemap_index(self):
response = self.client.get('/sitemap.xml', HTTP_HOST='docs.djangoproject.dev:8000')
self.assertContains(response, '<sitemap>', count=2)
self.assertContains(response, '<loc>http://docs.djangoproject.dev:8000/sitemap-en.xml</loc>')
def test_sitemap(self):
doc_release = DocumentRelease.objects.create()
document = Document.objects.create(release=doc_release)
sitemap = DocsSitemap('en')
urls = sitemap.get_urls()
self.assertEqual(len(urls), 1)
url_info = urls[0]
self.assertEqual(url_info['location'], document.get_absolute_url())
def test_sitemap_404(self):
response = self.client.get('/sitemap-xx.xml', HTTP_HOST='docs.djangoproject.dev:8000')
self.assertEqual(response.status_code, 404)
self.assertEqual(
response.context['exception'],
"No sitemap available for section: 'xx'"
)
class DocumentManagerTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.release = DocumentRelease.objects.create()
documents = [
{
'metadata': {
'body': (
'<div class="section" id="s-generic-views">\n<span id="generic-views"></span>'
'<h1>Generic views<a class="headerlink" href="#generic-views" title="Permalink to this headline">¶</a></h1>\n'
'<p>See <a class="reference internal" href="../../../ref/class-based-views/">'
'<span class="doc">Built-in class-based views API</span></a>.</p>\n</div>\n'
),
'breadcrumbs': [],
'parents': 'topics http',
'slug': 'generic-views',
'title': 'Generic views',
'toc': '<ul>\n<li><a class="reference internal" href="#">Generic views</a></li>\n</ul>\n'
},
'path': 'topics/http/generic-views',
'release': cls.release,
'title': 'Generic views',
},
{
'metadata': {
'body': (
'<div class="section" id="s-django-1-2-1-release-notes">\n<span id="django-1-2-1-release-notes"></span>'
'<h1>Django 1.2.1 release notes<a class="headerlink" href="#django-1-2-1-release-notes" title="Permalink to this headline">¶</a></h1>\n'
'<p>Django 1.2.1 was released almost immediately after 1.2.0 to correct two small\n'
'bugs: one was in the documentation packaging script, the other was a '
'<a class="reference external" href="https://code.djangoproject.com/ticket/13560">bug</a> that\n'
'affected datetime form field widgets when localization was enabled.</p>\n</div>\n'
),
'breadcrumbs': [],
'parents': 'releases',
'slug': '1.2.1',
'title': 'Django 1.2.1 release notes',
'toc': '<ul>\n<li><a class="reference internal" href="#">Django 1.2.1 release notes</a></li>\n</ul>\n'
},
'path': 'releases/1.2.1',
'release': cls.release,
'title': 'Django 1.2.1 release notes'
},
{
'metadata': {
'body': (
'<div class="section" id="s-django-1-9-4-release-notes">\n<span id="django-1-9-4-release-notes"></span>'
'<h1>Django 1.9.4 release notes<a class="headerlink" href="#django-1-9-4-release-notes" title="Permalink to this headline">¶</a></h1>\n'
'<p><em>March 5, 2016</em></p>\n<p>Django 1.9.4 fixes a regression on Python 2 in the 1.9.3 security release\n'
'where <code class="docutils literal"><span class="pre">utils.http.is_safe_url()</span></code> crashes on bytestring URLs '
'(<a class="reference external" href="https://code.djangoproject.com/ticket/26308">#26308</a>).</p>\n</div>\n'
),
'breadcrumbs': [],
'parents': 'releases',
'slug': '1.9.4',
'title': 'Django 1.9.4 release notes',
'toc': '<ul>\n<li><a class="reference internal" href="#">Django 1.9.4 release notes</a></li>\n</ul>\n'
},
'path': 'releases/1.9.4',
'release': cls.release,
'title': 'Django 1.9.4 release notes'
}
]
Document.objects.bulk_create(((Document(**doc) for doc in documents)))
Document.objects.update(search=DOCUMENT_SEARCH_VECTOR)
def test_search(self):
query_text = 'django'
document_queryset = Document.objects.search(query_text, self.release).values_list('title', 'rank')
document_list = [('Django 1.2.1 release notes', 0.969828), ('Django 1.9.4 release notes', 0.949088)]
self.assertSequenceEqual(list(document_queryset), document_list)
def test_empty_search(self):
self.assertSequenceEqual(Document.objects.search('', self.release), [])
| 40.154229
| 160
| 0.566411
|
3b02762f52803cee0cc5739197e72e8f7a8c1b16
| 5,891
|
py
|
Python
|
huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/part_attendee.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/part_attendee.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/part_attendee.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class PartAttendee:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str',
'phone': 'str',
'phone2': 'str',
'phone3': 'str',
'type': 'str'
}
attribute_map = {
'name': 'name',
'phone': 'phone',
'phone2': 'phone2',
'phone3': 'phone3',
'type': 'type'
}
def __init__(self, name=None, phone=None, phone2=None, phone3=None, type=None):
"""PartAttendee - a model defined in huaweicloud sdk"""
self._name = None
self._phone = None
self._phone2 = None
self._phone3 = None
self._type = None
self.discriminator = None
if name is not None:
self.name = name
if phone is not None:
self.phone = phone
if phone2 is not None:
self.phone2 = phone2
if phone3 is not None:
self.phone3 = phone3
if type is not None:
self.type = type
@property
def name(self):
"""Gets the name of this PartAttendee.
与会者名称或昵称。长度限制为96个字符。
:return: The name of this PartAttendee.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PartAttendee.
与会者名称或昵称。长度限制为96个字符。
:param name: The name of this PartAttendee.
:type: str
"""
self._name = name
@property
def phone(self):
"""Gets the phone of this PartAttendee.
电话号码(可支持SIP、TEL号码格式)。最大不超过127个字符。 当type为telepresence时,且设备为三屏智真,则该字段填写中屏号码。
:return: The phone of this PartAttendee.
:rtype: str
"""
return self._phone
@phone.setter
def phone(self, phone):
"""Sets the phone of this PartAttendee.
电话号码(可支持SIP、TEL号码格式)。最大不超过127个字符。 当type为telepresence时,且设备为三屏智真,则该字段填写中屏号码。
:param phone: The phone of this PartAttendee.
:type: str
"""
self._phone = phone
@property
def phone2(self):
"""Gets the phone2 of this PartAttendee.
取值类型同参数phone。(预留字段) 当type为telepresence时,且设备为三屏智真,则该字段填写左屏号码。
:return: The phone2 of this PartAttendee.
:rtype: str
"""
return self._phone2
@phone2.setter
def phone2(self, phone2):
"""Sets the phone2 of this PartAttendee.
取值类型同参数phone。(预留字段) 当type为telepresence时,且设备为三屏智真,则该字段填写左屏号码。
:param phone2: The phone2 of this PartAttendee.
:type: str
"""
self._phone2 = phone2
@property
def phone3(self):
"""Gets the phone3 of this PartAttendee.
取值类型同参数phone。(预留字段) 当type为telepresence时,且设备为三屏智真,则该字段填写右屏号码。
:return: The phone3 of this PartAttendee.
:rtype: str
"""
return self._phone3
@phone3.setter
def phone3(self, phone3):
"""Sets the phone3 of this PartAttendee.
取值类型同参数phone。(预留字段) 当type为telepresence时,且设备为三屏智真,则该字段填写右屏号码。
:param phone3: The phone3 of this PartAttendee.
:type: str
"""
self._phone3 = phone3
@property
def type(self):
"""Gets the type of this PartAttendee.
默认值由会议AS定义,号码类型枚举如下: - normal: 软终端。 - telepresence: 智真。单屏、三屏智真均属此类。(预留字段) - terminal: 会议室或硬终端。 - outside: 外部与会人。 - mobile: 用户手机号码。 - telephone: 用户固定电话。(预留字段)
:return: The type of this PartAttendee.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this PartAttendee.
默认值由会议AS定义,号码类型枚举如下: - normal: 软终端。 - telepresence: 智真。单屏、三屏智真均属此类。(预留字段) - terminal: 会议室或硬终端。 - outside: 外部与会人。 - mobile: 用户手机号码。 - telephone: 用户固定电话。(预留字段)
:param type: The type of this PartAttendee.
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PartAttendee):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.41704
| 165
| 0.556442
|
2dfa62a07afa7882df838ce65ca1ba9d85be1b93
| 2,274
|
py
|
Python
|
pfcon/mount_dir.py
|
jbernal0019/pfcon
|
752e3871b2e1bc3ed5227fad588e8371abc17c51
|
[
"MIT"
] | 7
|
2017-10-15T23:18:34.000Z
|
2020-10-25T07:20:47.000Z
|
pfcon/mount_dir.py
|
jbernal0019/pfcon
|
752e3871b2e1bc3ed5227fad588e8371abc17c51
|
[
"MIT"
] | 47
|
2017-12-06T19:07:50.000Z
|
2022-03-11T03:31:49.000Z
|
pfcon/mount_dir.py
|
jbernal0019/pfcon
|
752e3871b2e1bc3ed5227fad588e8371abc17c51
|
[
"MIT"
] | 18
|
2017-11-17T17:48:29.000Z
|
2021-04-23T01:52:48.000Z
|
"""
Handle MountDir file storage.
"""
import logging
import datetime
import zipfile
import os
import io
import shutil
logger = logging.getLogger(__name__)
class MountDir:
def __init__(self, config=None):
self.config = config
def store_data(self, job_id, job_incoming_dir, input_stream):
"""
Unpack and store the files/directories in the input zip stream at the specified
incoming directory.
"""
with zipfile.ZipFile(input_stream, 'r', zipfile.ZIP_DEFLATED) as job_zip:
filenames = job_zip.namelist()
nfiles = len(filenames)
logger.info(f'{nfiles} files to decompress for job {job_id}')
job_zip.extractall(path=job_incoming_dir)
return {
'jid': job_id,
'nfiles': nfiles,
'timestamp': f'{datetime.datetime.now()}',
'path': job_incoming_dir
}
def get_data(self, job_id, job_outgoing_dir):
"""
Create job zip file ready for transmission to a remote origin from the
outgoing directory.
"""
memory_zip_file = io.BytesIO()
nfiles = 0
with zipfile.ZipFile(memory_zip_file, 'w', zipfile.ZIP_DEFLATED) as job_zip:
for root, dirs, files in os.walk(job_outgoing_dir):
for filename in files:
local_file_path = os.path.join(root, filename)
if not os.path.islink(local_file_path):
arc_file_path = os.path.relpath(local_file_path, job_outgoing_dir)
try:
with open(local_file_path, 'rb') as f:
job_zip.writestr(arc_file_path, f.read())
except Exception as e:
logger.error(f'Failed to read file {local_file_path} for '
f'job {job_id}, detail: {str(e)}')
else:
nfiles += 1
memory_zip_file.seek(0)
logger.info(f'{nfiles} files compressed for job {job_id}')
return memory_zip_file
def delete_data(self, job_dir):
"""
Delete job data from the store.
"""
shutil.rmtree(job_dir)
| 32.956522
| 90
| 0.559807
|
c861d25afd13ffb77f601902e2328eaaaad939d5
| 44,142
|
py
|
Python
|
.venv/lib/python3.8/site-packages/pandas/core/arrays/interval.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 115
|
2020-06-18T15:00:58.000Z
|
2022-03-02T10:13:19.000Z
|
.venv/lib/python3.8/site-packages/pandas/core/arrays/interval.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 21
|
2021-04-13T01:17:40.000Z
|
2022-03-11T16:06:50.000Z
|
.venv/lib/python3.8/site-packages/pandas/core/arrays/interval.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 60
|
2020-07-22T14:53:10.000Z
|
2022-03-23T10:17:59.000Z
|
from operator import le, lt
import textwrap
import numpy as np
from pandas._config import get_option
from pandas._libs.interval import Interval, IntervalMixin, intervals_to_interval_bounds
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender
from pandas.core.dtypes.cast import maybe_convert_platform
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_any_dtype,
is_float_dtype,
is_integer_dtype,
is_interval,
is_interval_dtype,
is_list_like,
is_object_dtype,
is_scalar,
is_string_dtype,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas.core.dtypes.generic import (
ABCDatetimeIndex,
ABCIndexClass,
ABCIntervalIndex,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.algorithms import take, value_counts
from pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs
from pandas.core.arrays.categorical import Categorical
import pandas.core.common as com
from pandas.core.construction import array
from pandas.core.indexers import check_array_indexer
from pandas.core.indexes.base import ensure_index
_VALID_CLOSED = {"left", "right", "both", "neither"}
_interval_shared_docs = {}
_shared_docs_kwargs = dict(
klass="IntervalArray", qualname="arrays.IntervalArray", name=""
)
_interval_shared_docs[
"class"
] = """
%(summary)s
.. versionadded:: %(versionadded)s
Parameters
----------
data : array-like (1-dimensional)
Array-like containing Interval objects from which to build the
%(klass)s.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both or
neither.
dtype : dtype or None, default None
If None, dtype will be inferred.
.. versionadded:: 0.23.0
copy : bool, default False
Copy the input data.
%(name)s\
verify_integrity : bool, default True
Verify that the %(klass)s is valid.
Attributes
----------
left
right
closed
mid
length
is_empty
is_non_overlapping_monotonic
%(extra_attributes)s\
Methods
-------
from_arrays
from_tuples
from_breaks
contains
overlaps
set_closed
to_tuples
%(extra_methods)s\
See Also
--------
Index : The base pandas Index type.
Interval : A bounded slice-like interval; the elements of an %(klass)s.
interval_range : Function to create a fixed frequency IntervalIndex.
cut : Bin values into discrete Intervals.
qcut : Bin values into equal-sized Intervals based on rank or sample quantiles.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#intervalindex>`_
for more.
%(examples)s\
"""
@Appender(
_interval_shared_docs["class"]
% dict(
klass="IntervalArray",
summary="Pandas array for interval data that are closed on the same side.",
versionadded="0.24.0",
name="",
extra_attributes="",
extra_methods="",
examples=textwrap.dedent(
"""\
Examples
--------
A new ``IntervalArray`` can be constructed directly from an array-like of
``Interval`` objects:
>>> pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
<IntervalArray>
[(0, 1], (1, 5]]
Length: 2, closed: right, dtype: interval[int64]
It may also be constructed using one of the constructor
methods: :meth:`IntervalArray.from_arrays`,
:meth:`IntervalArray.from_breaks`, and :meth:`IntervalArray.from_tuples`.
"""
),
)
)
class IntervalArray(IntervalMixin, ExtensionArray):
ndim = 1
can_hold_na = True
_na_value = _fill_value = np.nan
def __new__(
cls,
data,
closed=None,
dtype=None,
copy: bool = False,
verify_integrity: bool = True,
):
if isinstance(data, ABCSeries) and is_interval_dtype(data.dtype):
data = data._values
if isinstance(data, (cls, ABCIntervalIndex)):
left = data.left
right = data.right
closed = closed or data.closed
else:
# don't allow scalars
if is_scalar(data):
msg = (
f"{cls.__name__}(...) must be called with a collection "
f"of some kind, {data} was passed"
)
raise TypeError(msg)
# might need to convert empty or purely na data
data = maybe_convert_platform_interval(data)
left, right, infer_closed = intervals_to_interval_bounds(
data, validate_closed=closed is None
)
closed = closed or infer_closed
return cls._simple_new(
left,
right,
closed,
copy=copy,
dtype=dtype,
verify_integrity=verify_integrity,
)
@classmethod
def _simple_new(
cls, left, right, closed=None, copy=False, dtype=None, verify_integrity=True
):
result = IntervalMixin.__new__(cls)
closed = closed or "right"
left = ensure_index(left, copy=copy)
right = ensure_index(right, copy=copy)
if dtype is not None:
# GH 19262: dtype must be an IntervalDtype to override inferred
dtype = pandas_dtype(dtype)
if not is_interval_dtype(dtype):
msg = f"dtype must be an IntervalDtype, got {dtype}"
raise TypeError(msg)
elif dtype.subtype is not None:
left = left.astype(dtype.subtype)
right = right.astype(dtype.subtype)
# coerce dtypes to match if needed
if is_float_dtype(left) and is_integer_dtype(right):
right = right.astype(left.dtype)
elif is_float_dtype(right) and is_integer_dtype(left):
left = left.astype(right.dtype)
if type(left) != type(right):
msg = (
f"must not have differing left [{type(left).__name__}] and "
f"right [{type(right).__name__}] types"
)
raise ValueError(msg)
elif is_categorical_dtype(left.dtype) or is_string_dtype(left.dtype):
# GH 19016
msg = (
"category, object, and string subtypes are not supported "
"for IntervalArray"
)
raise TypeError(msg)
elif isinstance(left, ABCPeriodIndex):
msg = "Period dtypes are not supported, use a PeriodIndex instead"
raise ValueError(msg)
elif isinstance(left, ABCDatetimeIndex) and str(left.tz) != str(right.tz):
msg = (
"left and right must have the same time zone, got "
f"'{left.tz}' and '{right.tz}'"
)
raise ValueError(msg)
result._left = left
result._right = right
result._closed = closed
if verify_integrity:
result._validate()
return result
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return cls(scalars, dtype=dtype, copy=copy)
@classmethod
def _from_factorized(cls, values, original):
if len(values) == 0:
# An empty array returns object-dtype here. We can't create
# a new IA from an (empty) object-dtype array, so turn it into the
# correct dtype.
values = values.astype(original.dtype.subtype)
return cls(values, closed=original.closed)
_interval_shared_docs["from_breaks"] = textwrap.dedent(
"""
Construct an %(klass)s from an array of splits.
Parameters
----------
breaks : array-like (1-dimensional)
Left and right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
copy : bool, default False
Copy the data.
dtype : dtype or None, default None
If None, dtype will be inferred.
.. versionadded:: 0.23.0
Returns
-------
%(klass)s
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
%(klass)s.from_arrays : Construct from a left and right array.
%(klass)s.from_tuples : Construct from a sequence of tuples.
%(examples)s\
"""
)
@classmethod
@Appender(
_interval_shared_docs["from_breaks"]
% dict(
klass="IntervalArray",
examples=textwrap.dedent(
"""\
Examples
--------
>>> pd.arrays.IntervalArray.from_breaks([0, 1, 2, 3])
<IntervalArray>
[(0, 1], (1, 2], (2, 3]]
Length: 3, closed: right, dtype: interval[int64]
"""
),
)
)
def from_breaks(cls, breaks, closed="right", copy=False, dtype=None):
breaks = maybe_convert_platform_interval(breaks)
return cls.from_arrays(breaks[:-1], breaks[1:], closed, copy=copy, dtype=dtype)
_interval_shared_docs["from_arrays"] = textwrap.dedent(
"""
Construct from two arrays defining the left and right bounds.
Parameters
----------
left : array-like (1-dimensional)
Left bounds for each interval.
right : array-like (1-dimensional)
Right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
copy : bool, default False
Copy the data.
dtype : dtype, optional
If None, dtype will be inferred.
.. versionadded:: 0.23.0
Returns
-------
%(klass)s
Raises
------
ValueError
When a value is missing in only one of `left` or `right`.
When a value in `left` is greater than the corresponding value
in `right`.
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
%(klass)s.from_breaks : Construct an %(klass)s from an array of
splits.
%(klass)s.from_tuples : Construct an %(klass)s from an
array-like of tuples.
Notes
-----
Each element of `left` must be less than or equal to the `right`
element at the same position. If an element is missing, it must be
missing in both `left` and `right`. A TypeError is raised when
using an unsupported type for `left` or `right`. At the moment,
'category', 'object', and 'string' subtypes are not supported.
%(examples)s\
"""
)
@classmethod
@Appender(
_interval_shared_docs["from_arrays"]
% dict(
klass="IntervalArray",
examples=textwrap.dedent(
"""\
>>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3])
<IntervalArray>
[(0, 1], (1, 2], (2, 3]]
Length: 3, closed: right, dtype: interval[int64]
"""
),
)
)
def from_arrays(cls, left, right, closed="right", copy=False, dtype=None):
left = maybe_convert_platform_interval(left)
right = maybe_convert_platform_interval(right)
return cls._simple_new(
left, right, closed, copy=copy, dtype=dtype, verify_integrity=True
)
_interval_shared_docs["from_tuples"] = textwrap.dedent(
"""
Construct an %(klass)s from an array-like of tuples.
Parameters
----------
data : array-like (1-dimensional)
Array of tuples.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
copy : bool, default False
By-default copy the data, this is compat only and ignored.
dtype : dtype or None, default None
If None, dtype will be inferred.
.. versionadded:: 0.23.0
Returns
-------
%(klass)s
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
%(klass)s.from_arrays : Construct an %(klass)s from a left and
right array.
%(klass)s.from_breaks : Construct an %(klass)s from an array of
splits.
%(examples)s\
"""
)
@classmethod
@Appender(
_interval_shared_docs["from_tuples"]
% dict(
klass="IntervalArray",
examples=textwrap.dedent(
"""\
Examples
--------
>>> pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)])
<IntervalArray>
[(0, 1], (1, 2]]
Length: 2, closed: right, dtype: interval[int64]
"""
),
)
)
def from_tuples(cls, data, closed="right", copy=False, dtype=None):
if len(data):
left, right = [], []
else:
# ensure that empty data keeps input dtype
left = right = data
for d in data:
if isna(d):
lhs = rhs = np.nan
else:
name = cls.__name__
try:
# need list of length 2 tuples, e.g. [(0, 1), (1, 2), ...]
lhs, rhs = d
except ValueError as err:
msg = f"{name}.from_tuples requires tuples of length 2, got {d}"
raise ValueError(msg) from err
except TypeError as err:
msg = f"{name}.from_tuples received an invalid item, {d}"
raise TypeError(msg) from err
left.append(lhs)
right.append(rhs)
return cls.from_arrays(left, right, closed, copy=False, dtype=dtype)
def _validate(self):
"""
Verify that the IntervalArray is valid.
Checks that
* closed is valid
* left and right match lengths
* left and right have the same missing values
* left is always below right
"""
if self.closed not in _VALID_CLOSED:
msg = f"invalid option for 'closed': {self.closed}"
raise ValueError(msg)
if len(self.left) != len(self.right):
msg = "left and right must have the same length"
raise ValueError(msg)
left_mask = notna(self.left)
right_mask = notna(self.right)
if not (left_mask == right_mask).all():
msg = (
"missing values must be missing in the same "
"location both left and right sides"
)
raise ValueError(msg)
if not (self.left[left_mask] <= self.right[left_mask]).all():
msg = "left side of interval must be <= right side"
raise ValueError(msg)
# ---------
# Interface
# ---------
def __iter__(self):
return iter(np.asarray(self))
def __len__(self) -> int:
return len(self.left)
def __getitem__(self, value):
value = check_array_indexer(self, value)
left = self.left[value]
right = self.right[value]
# scalar
if not isinstance(left, ABCIndexClass):
if is_scalar(left) and isna(left):
return self._fill_value
if np.ndim(left) > 1:
# GH#30588 multi-dimensional indexer disallowed
raise ValueError("multi-dimensional indexing not allowed")
return Interval(left, right, self.closed)
return self._shallow_copy(left, right)
def __setitem__(self, key, value):
# na value: need special casing to set directly on numpy arrays
needs_float_conversion = False
if is_scalar(value) and isna(value):
if is_integer_dtype(self.dtype.subtype):
# can't set NaN on a numpy integer array
needs_float_conversion = True
elif is_datetime64_any_dtype(self.dtype.subtype):
# need proper NaT to set directly on the numpy array
value = np.datetime64("NaT")
elif is_timedelta64_dtype(self.dtype.subtype):
# need proper NaT to set directly on the numpy array
value = np.timedelta64("NaT")
value_left, value_right = value, value
# scalar interval
elif is_interval_dtype(value) or isinstance(value, Interval):
self._check_closed_matches(value, name="value")
value_left, value_right = value.left, value.right
else:
# list-like of intervals
try:
array = IntervalArray(value)
value_left, value_right = array.left, array.right
except TypeError as err:
# wrong type: not interval or NA
msg = f"'value' should be an interval type, got {type(value)} instead."
raise TypeError(msg) from err
if needs_float_conversion:
raise ValueError("Cannot set float NaN to integer-backed IntervalArray")
key = check_array_indexer(self, key)
# Need to ensure that left and right are updated atomically, so we're
# forced to copy, update the copy, and swap in the new values.
left = self.left.copy(deep=True)
left._values[key] = value_left
self._left = left
right = self.right.copy(deep=True)
right._values[key] = value_right
self._right = right
def __eq__(self, other):
# ensure pandas array for list-like and eliminate non-interval scalars
if is_list_like(other):
if len(self) != len(other):
raise ValueError("Lengths must match to compare")
other = array(other)
elif not isinstance(other, Interval):
# non-interval scalar -> no matches
return np.zeros(len(self), dtype=bool)
# determine the dtype of the elements we want to compare
if isinstance(other, Interval):
other_dtype = pandas_dtype("interval")
elif not is_categorical_dtype(other.dtype):
other_dtype = other.dtype
else:
# for categorical defer to categories for dtype
other_dtype = other.categories.dtype
# extract intervals if we have interval categories with matching closed
if is_interval_dtype(other_dtype):
if self.closed != other.categories.closed:
return np.zeros(len(self), dtype=bool)
other = other.categories.take(other.codes)
# interval-like -> need same closed and matching endpoints
if is_interval_dtype(other_dtype):
if self.closed != other.closed:
return np.zeros(len(self), dtype=bool)
return (self.left == other.left) & (self.right == other.right)
# non-interval/non-object dtype -> no matches
if not is_object_dtype(other_dtype):
return np.zeros(len(self), dtype=bool)
# object dtype -> iteratively check for intervals
result = np.zeros(len(self), dtype=bool)
for i, obj in enumerate(other):
# need object to be an Interval with same closed and endpoints
if (
isinstance(obj, Interval)
and self.closed == obj.closed
and self.left[i] == obj.left
and self.right[i] == obj.right
):
result[i] = True
return result
def fillna(self, value=None, method=None, limit=None):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should be either Interval objects or NA/NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
(Not implemented yet for IntervalArray)
Method to use for filling holes in reindexed Series
limit : int, default None
(Not implemented yet for IntervalArray)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : IntervalArray with NA/NaN filled
"""
if method is not None:
raise TypeError("Filling by method is not supported for IntervalArray.")
if limit is not None:
raise TypeError("limit is not supported for IntervalArray.")
if not isinstance(value, Interval):
msg = (
"'IntervalArray.fillna' only supports filling with a "
f"scalar 'pandas.Interval'. Got a '{type(value).__name__}' instead."
)
raise TypeError(msg)
self._check_closed_matches(value, name="value")
left = self.left.fillna(value=value.left)
right = self.right.fillna(value=value.right)
return self._shallow_copy(left, right)
@property
def dtype(self):
return IntervalDtype(self.left.dtype)
def astype(self, dtype, copy=True):
"""
Cast to an ExtensionArray or NumPy array with dtype 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ExtensionArray or ndarray
ExtensionArray or NumPy ndarray with 'dtype' for its dtype.
"""
from pandas.core.arrays.string_ import StringDtype
if dtype is not None:
dtype = pandas_dtype(dtype)
if is_interval_dtype(dtype):
if dtype == self.dtype:
return self.copy() if copy else self
# need to cast to different subtype
try:
new_left = self.left.astype(dtype.subtype)
new_right = self.right.astype(dtype.subtype)
except TypeError as err:
msg = (
f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible"
)
raise TypeError(msg) from err
return self._shallow_copy(new_left, new_right)
elif is_categorical_dtype(dtype):
return Categorical(np.asarray(self))
elif isinstance(dtype, StringDtype):
return dtype.construct_array_type()._from_sequence(self, copy=False)
# TODO: This try/except will be repeated.
try:
return np.asarray(self).astype(dtype, copy=copy)
except (TypeError, ValueError) as err:
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
raise TypeError(msg) from err
@classmethod
def _concat_same_type(cls, to_concat):
"""
Concatenate multiple IntervalArray
Parameters
----------
to_concat : sequence of IntervalArray
Returns
-------
IntervalArray
"""
closed = {interval.closed for interval in to_concat}
if len(closed) != 1:
raise ValueError("Intervals must all be closed on the same side.")
closed = closed.pop()
left = np.concatenate([interval.left for interval in to_concat])
right = np.concatenate([interval.right for interval in to_concat])
return cls._simple_new(left, right, closed=closed, copy=False)
def _shallow_copy(self, left, right):
"""
Return a new IntervalArray with the replacement attributes
Parameters
----------
left : Index
Values to be used for the left-side of the intervals.
right : Index
Values to be used for the right-side of the intervals.
"""
return self._simple_new(left, right, closed=self.closed, verify_integrity=False)
def copy(self):
"""
Return a copy of the array.
Returns
-------
IntervalArray
"""
left = self.left.copy(deep=True)
right = self.right.copy(deep=True)
closed = self.closed
# TODO: Could skip verify_integrity here.
return type(self).from_arrays(left, right, closed=closed)
def isna(self):
return isna(self.left)
@property
def nbytes(self) -> int:
return self.left.nbytes + self.right.nbytes
@property
def size(self) -> int:
# Avoid materializing self.values
return self.left.size
def shift(self, periods: int = 1, fill_value: object = None) -> "IntervalArray":
if not len(self) or periods == 0:
return self.copy()
if isna(fill_value):
fill_value = self.dtype.na_value
# ExtensionArray.shift doesn't work for two reasons
# 1. IntervalArray.dtype.na_value may not be correct for the dtype.
# 2. IntervalArray._from_sequence only accepts NaN for missing values,
# not other values like NaT
empty_len = min(abs(periods), len(self))
if isna(fill_value):
fill_value = self.left._na_value
empty = IntervalArray.from_breaks([fill_value] * (empty_len + 1))
else:
empty = self._from_sequence([fill_value] * empty_len)
if periods > 0:
a = empty
b = self[:-periods]
else:
a = self[abs(periods) :]
b = empty
return self._concat_same_type([a, b])
def take(self, indices, allow_fill=False, fill_value=None, axis=None, **kwargs):
"""
Take elements from the IntervalArray.
Parameters
----------
indices : sequence of integers
Indices to be taken.
allow_fill : bool, default False
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : Interval or NA, optional
Fill value to use for NA-indices when `allow_fill` is True.
This may be ``None``, in which case the default NA value for
the type, ``self.dtype.na_value``, is used.
For many ExtensionArrays, there will be two representations of
`fill_value`: a user-facing "boxed" scalar, and a low-level
physical NA value. `fill_value` should be the user-facing version,
and the implementation should handle translating that to the
physical version for processing the take if necessary.
axis : any, default None
Present for compat with IntervalIndex; does nothing.
Returns
-------
IntervalArray
Raises
------
IndexError
When the indices are out of bounds for the array.
ValueError
When `indices` contains negative values other than ``-1``
and `allow_fill` is True.
"""
nv.validate_take(tuple(), kwargs)
fill_left = fill_right = fill_value
if allow_fill:
if fill_value is None:
fill_left = fill_right = self.left._na_value
elif is_interval(fill_value):
self._check_closed_matches(fill_value, name="fill_value")
fill_left, fill_right = fill_value.left, fill_value.right
elif not is_scalar(fill_value) and notna(fill_value):
msg = (
"'IntervalArray.fillna' only supports filling with a "
"'scalar pandas.Interval or NA'. "
f"Got a '{type(fill_value).__name__}' instead."
)
raise ValueError(msg)
left_take = take(
self.left, indices, allow_fill=allow_fill, fill_value=fill_left
)
right_take = take(
self.right, indices, allow_fill=allow_fill, fill_value=fill_right
)
return self._shallow_copy(left_take, right_take)
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each interval.
Parameters
----------
dropna : bool, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
# TODO: implement this is a non-naive way!
return value_counts(np.asarray(self), dropna=dropna)
# Formatting
def _format_data(self):
# TODO: integrate with categorical and make generic
# name argument is unused here; just for compat with base / categorical
n = len(self)
max_seq_items = min((get_option("display.max_seq_items") or n) // 10, 10)
formatter = str
if n == 0:
summary = "[]"
elif n == 1:
first = formatter(self[0])
summary = f"[{first}]"
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = f"[{first}, {last}]"
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
head_str = ", ".join(head)
tail_str = ", ".join(tail)
summary = f"[{head_str} ... {tail_str}]"
else:
tail = [formatter(x) for x in self]
tail_str = ", ".join(tail)
summary = f"[{tail_str}]"
return summary
def __repr__(self) -> str:
# the short repr has no trailing newline, while the truncated
# repr does. So we include a newline in our template, and strip
# any trailing newlines from format_object_summary
data = self._format_data()
class_name = f"<{type(self).__name__}>\n"
template = (
f"{class_name}"
f"{data}\n"
f"Length: {len(self)}, closed: {self.closed}, dtype: {self.dtype}"
)
return template
def _format_space(self):
space = " " * (len(type(self).__name__) + 1)
return f"\n{space}"
@property
def left(self):
"""
Return the left endpoints of each Interval in the IntervalArray as
an Index.
"""
return self._left
@property
def right(self):
"""
Return the right endpoints of each Interval in the IntervalArray as
an Index.
"""
return self._right
@property
def closed(self):
"""
Whether the intervals are closed on the left-side, right-side, both or
neither.
"""
return self._closed
_interval_shared_docs["set_closed"] = textwrap.dedent(
"""
Return an %(klass)s identical to the current one, but closed on the
specified side.
.. versionadded:: 0.24.0
Parameters
----------
closed : {'left', 'right', 'both', 'neither'}
Whether the intervals are closed on the left-side, right-side, both
or neither.
Returns
-------
new_index : %(klass)s
%(examples)s\
"""
)
@Appender(
_interval_shared_docs["set_closed"]
% dict(
klass="IntervalArray",
examples=textwrap.dedent(
"""\
Examples
--------
>>> index = pd.arrays.IntervalArray.from_breaks(range(4))
>>> index
<IntervalArray>
[(0, 1], (1, 2], (2, 3]]
Length: 3, closed: right, dtype: interval[int64]
>>> index.set_closed('both')
<IntervalArray>
[[0, 1], [1, 2], [2, 3]]
Length: 3, closed: both, dtype: interval[int64]
"""
),
)
)
def set_closed(self, closed):
if closed not in _VALID_CLOSED:
msg = f"invalid option for 'closed': {closed}"
raise ValueError(msg)
return type(self)._simple_new(
left=self.left, right=self.right, closed=closed, verify_integrity=False
)
@property
def length(self):
"""
Return an Index with entries denoting the length of each Interval in
the IntervalArray.
"""
try:
return self.right - self.left
except TypeError as err:
# length not defined for some types, e.g. string
msg = (
"IntervalArray contains Intervals without defined length, "
"e.g. Intervals with string endpoints"
)
raise TypeError(msg) from err
@property
def mid(self):
"""
Return the midpoint of each Interval in the IntervalArray as an Index.
"""
try:
return 0.5 * (self.left + self.right)
except TypeError:
# datetime safe version
return self.left + 0.5 * self.length
_interval_shared_docs[
"is_non_overlapping_monotonic"
] = """
Return True if the %(klass)s is non-overlapping (no Intervals share
points) and is either monotonic increasing or monotonic decreasing,
else False.
"""
# https://github.com/python/mypy/issues/1362
# Mypy does not support decorated properties
@property # type: ignore
@Appender(
_interval_shared_docs["is_non_overlapping_monotonic"] % _shared_docs_kwargs
)
def is_non_overlapping_monotonic(self):
# must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... )
# or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...)
# we already require left <= right
# strict inequality for closed == 'both'; equality implies overlapping
# at a point when both sides of intervals are included
if self.closed == "both":
return bool(
(self.right[:-1] < self.left[1:]).all()
or (self.left[:-1] > self.right[1:]).all()
)
# non-strict inequality when closed != 'both'; at least one side is
# not included in the intervals, so equality does not imply overlapping
return bool(
(self.right[:-1] <= self.left[1:]).all()
or (self.left[:-1] >= self.right[1:]).all()
)
# Conversion
def __array__(self, dtype=None) -> np.ndarray:
"""
Return the IntervalArray's data as a numpy array of Interval
objects (with dtype='object')
"""
left = self.left
right = self.right
mask = self.isna()
closed = self._closed
result = np.empty(len(left), dtype=object)
for i in range(len(left)):
if mask[i]:
result[i] = np.nan
else:
result[i] = Interval(left[i], right[i], closed)
return result
def __arrow_array__(self, type=None):
"""
Convert myself into a pyarrow Array.
"""
import pyarrow
from pandas.core.arrays._arrow_utils import ArrowIntervalType
try:
subtype = pyarrow.from_numpy_dtype(self.dtype.subtype)
except TypeError as err:
raise TypeError(
f"Conversion to arrow with subtype '{self.dtype.subtype}' "
"is not supported"
) from err
interval_type = ArrowIntervalType(subtype, self.closed)
storage_array = pyarrow.StructArray.from_arrays(
[
pyarrow.array(self.left, type=subtype, from_pandas=True),
pyarrow.array(self.right, type=subtype, from_pandas=True),
],
names=["left", "right"],
)
mask = self.isna()
if mask.any():
# if there are missing values, set validity bitmap also on the array level
null_bitmap = pyarrow.array(~mask).buffers()[1]
storage_array = pyarrow.StructArray.from_buffers(
storage_array.type,
len(storage_array),
[null_bitmap],
children=[storage_array.field(0), storage_array.field(1)],
)
if type is not None:
if type.equals(interval_type.storage_type):
return storage_array
elif isinstance(type, ArrowIntervalType):
# ensure we have the same subtype and closed attributes
if not type.equals(interval_type):
raise TypeError(
"Not supported to convert IntervalArray to type with "
f"different 'subtype' ({self.dtype.subtype} vs {type.subtype}) "
f"and 'closed' ({self.closed} vs {type.closed}) attributes"
)
else:
raise TypeError(
f"Not supported to convert IntervalArray to '{type}' type"
)
return pyarrow.ExtensionArray.from_storage(interval_type, storage_array)
_interval_shared_docs[
"to_tuples"
] = """
Return an %(return_type)s of tuples of the form (left, right).
Parameters
----------
na_tuple : bool, default True
Returns NA as a tuple if True, ``(nan, nan)``, or just as the NA
value itself if False, ``nan``.
.. versionadded:: 0.23.0
Returns
-------
tuples: %(return_type)s
%(examples)s\
"""
@Appender(
_interval_shared_docs["to_tuples"] % dict(return_type="ndarray", examples="")
)
def to_tuples(self, na_tuple=True):
tuples = com.asarray_tuplesafe(zip(self.left, self.right))
if not na_tuple:
# GH 18756
tuples = np.where(~self.isna(), tuples, np.nan)
return tuples
@Appender(_extension_array_shared_docs["repeat"] % _shared_docs_kwargs)
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
left_repeat = self.left.repeat(repeats)
right_repeat = self.right.repeat(repeats)
return self._shallow_copy(left=left_repeat, right=right_repeat)
_interval_shared_docs["contains"] = textwrap.dedent(
"""
Check elementwise if the Intervals contain the value.
Return a boolean mask whether the value is contained in the Intervals
of the %(klass)s.
.. versionadded:: 0.25.0
Parameters
----------
other : scalar
The value to check whether it is contained in the Intervals.
Returns
-------
boolean array
See Also
--------
Interval.contains : Check whether Interval object contains value.
%(klass)s.overlaps : Check if an Interval overlaps the values in the
%(klass)s.
Examples
--------
%(examples)s
>>> intervals.contains(0.5)
array([ True, False, False])
"""
)
@Appender(
_interval_shared_docs["contains"]
% dict(
klass="IntervalArray",
examples=textwrap.dedent(
"""\
>>> intervals = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 3), (2, 4)])
>>> intervals
<IntervalArray>
[(0, 1], (1, 3], (2, 4]]
Length: 3, closed: right, dtype: interval[int64]
"""
),
)
)
def contains(self, other):
if isinstance(other, Interval):
raise NotImplementedError("contains not implemented for two intervals")
return (self.left < other if self.open_left else self.left <= other) & (
other < self.right if self.open_right else other <= self.right
)
_interval_shared_docs["overlaps"] = textwrap.dedent(
"""
Check elementwise if an Interval overlaps the values in the %(klass)s.
Two intervals overlap if they share a common point, including closed
endpoints. Intervals that only have an open endpoint in common do not
overlap.
.. versionadded:: 0.24.0
Parameters
----------
other : %(klass)s
Interval to check against for an overlap.
Returns
-------
ndarray
Boolean array positionally indicating where an overlap occurs.
See Also
--------
Interval.overlaps : Check whether two Interval objects overlap.
Examples
--------
%(examples)s
>>> intervals.overlaps(pd.Interval(0.5, 1.5))
array([ True, True, False])
Intervals that share closed endpoints overlap:
>>> intervals.overlaps(pd.Interval(1, 3, closed='left'))
array([ True, True, True])
Intervals that only have an open endpoint in common do not overlap:
>>> intervals.overlaps(pd.Interval(1, 2, closed='right'))
array([False, True, False])
"""
)
@Appender(
_interval_shared_docs["overlaps"]
% dict(
klass="IntervalArray",
examples=textwrap.dedent(
"""\
>>> data = [(0, 1), (1, 3), (2, 4)]
>>> intervals = pd.arrays.IntervalArray.from_tuples(data)
>>> intervals
<IntervalArray>
[(0, 1], (1, 3], (2, 4]]
Length: 3, closed: right, dtype: interval[int64]
"""
),
)
)
def overlaps(self, other):
if isinstance(other, (IntervalArray, ABCIntervalIndex)):
raise NotImplementedError
elif not isinstance(other, Interval):
msg = f"`other` must be Interval-like, got {type(other).__name__}"
raise TypeError(msg)
# equality is okay if both endpoints are closed (overlap at a point)
op1 = le if (self.closed_left and other.closed_right) else lt
op2 = le if (other.closed_left and self.closed_right) else lt
# overlaps is equivalent negation of two interval being disjoint:
# disjoint = (A.left > B.right) or (B.left > A.right)
# (simplifying the negation allows this to be done in less operations)
return op1(self.left, other.right) & op2(other.left, self.right)
def maybe_convert_platform_interval(values):
"""
Try to do platform conversion, with special casing for IntervalArray.
Wrapper around maybe_convert_platform that alters the default return
dtype in certain cases to be compatible with IntervalArray. For example,
empty lists return with integer dtype instead of object dtype, which is
prohibited for IntervalArray.
Parameters
----------
values : array-like
Returns
-------
array
"""
if isinstance(values, (list, tuple)) and len(values) == 0:
# GH 19016
# empty lists/tuples get object dtype by default, but this is
# prohibited for IntervalArray, so coerce to integer instead
return np.array([], dtype=np.int64)
elif is_categorical_dtype(values):
values = np.asarray(values)
return maybe_convert_platform(values)
| 32.819331
| 88
| 0.576141
|
d19fda0dcdc3984b3a9d81166bf1bd436efa6890
| 3,900
|
py
|
Python
|
fold.py
|
gmayday1997/pytorch_CAM
|
c51a0c7f7701005b8f031ed9a0f9b3b9680cf560
|
[
"MIT"
] | 23
|
2018-02-13T00:50:11.000Z
|
2021-02-04T01:49:34.000Z
|
fold.py
|
gmayday1997/pytorch-CAM
|
c51a0c7f7701005b8f031ed9a0f9b3b9680cf560
|
[
"MIT"
] | null | null | null |
fold.py
|
gmayday1997/pytorch-CAM
|
c51a0c7f7701005b8f031ed9a0f9b3b9680cf560
|
[
"MIT"
] | 5
|
2017-12-19T10:48:22.000Z
|
2021-02-04T01:49:35.000Z
|
import torch.utils.data as data
from PIL import Image
import os
import os.path
import numpy as np
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def find_classes(dir):
classes = [int(d) for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
new_classes = [str(cl) for cl in classes]
class_to_idx = {new_classes[i]: i for i in range(len(new_classes))}
return new_classes, class_to_idx
def make_dataset(dir, class_to_idx):
images = []
dir = os.path.expanduser(dir)
for target in sorted(os.listdir(dir)):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if is_image_file(fname):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class ImageFolder(data.Dataset):
"""A generic data loader where the images are arranged in this way: ::
root/dog/xxx.png
root/dog/xxy.png
root/dog/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
"""
def __init__(self, root, transform=None, target_transform=None,
loader=default_loader):
classes, class_to_idx = find_classes(root)
imgs = make_dataset(root, class_to_idx)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
#self.debug_img = self.imgs[600:900]
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
path, target = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.imgs)
| 30
| 101
| 0.614615
|
bd2274350c0289c2a1f28ce9b475e28ba9958ccc
| 2,085
|
py
|
Python
|
src/candy/log.py
|
mongmong/python-candy
|
ead8b02cd8cf82314fec0c40aff7c8d32cf7c855
|
[
"BSD-2-Clause"
] | null | null | null |
src/candy/log.py
|
mongmong/python-candy
|
ead8b02cd8cf82314fec0c40aff7c8d32cf7c855
|
[
"BSD-2-Clause"
] | null | null | null |
src/candy/log.py
|
mongmong/python-candy
|
ead8b02cd8cf82314fec0c40aff7c8d32cf7c855
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
'''
sweety.log
@author: Chris Chou <m2chrischou AT gmail.com>
@description:
Environment variables:
SWEETY_VERBOSE - turn on verbose mode.
SWEETY_LOG_FILENAME - specify the log file.
'''
from datetime import datetime
import logging
import os
import sys
from sweety import logstatus, util
_start_time = datetime.now()
_logfile_formatter = logging.Formatter(
'%(asctime)s - %(name)s | %(levelname)s: [%(filename)s:%(lineno)d] - %(message)s'
)
_console_formatter = logging.Formatter(
'%(asctime)s - %(message)s | %(levelname)s: [%(filename)s:%(lineno)d]'
)
def get_logger(name_or_self):
'''
get_logger(name_or_self) -> Logger
Gets logger with specified name.
@param name: the logger name or self.
@return: the logger object.
'''
lockname = 'sweety.log.%s.lock' % _start_time.strftime('%Y-%m-%d')
lock = util.FileLock(lockname)
lock.lock()
if not isinstance(name_or_self, (str, unicode)):
name_or_self = name_or_self.__class__.__name__
log = logging.getLogger(name_or_self)
log.setLevel(logging.DEBUG)
if not log.handlers:
buf = logging.StreamHandler(logstatus._content)
buf.setFormatter(_logfile_formatter)
log.addHandler(buf)
buf.setLevel(logging.INFO)
console = logging.StreamHandler(sys.stderr)
console.setFormatter(_console_formatter)
log.addHandler(console)
if os.environ.has_key('SWEETY_VERBOSE') and os.environ['SWEETY_VERBOSE']:
console.setLevel(logging.INFO)
else:
console.setLevel(logging.WARNING)
if os.environ.has_key('SWEETY_LOG_FILENAME'):
fn = os.environ['SWEETY_LOG_FILENAME']
fdir = os.path.dirname(fn)
fdir = os.path.join(fdir, _start_time.strftime('%Y-%m-%d'))
if not os.path.exists(fdir):
os.makedirs(fdir)
fn = '%s/%s.%s.%d' % (
fdir,
os.path.basename(fn),
_start_time.strftime('%Y-%m-%d_%H:%M:%S'),
os.getpid()
)
logfile = logging.FileHandler(fn)
logfile.setFormatter(_logfile_formatter)
log.addHandler(logfile)
logfile.setLevel(logging.DEBUG)
return log
logging.root = get_logger('root')
| 24.529412
| 90
| 0.701199
|
e1743454756530eac768df6ada9f5c6534f8269a
| 1,023
|
py
|
Python
|
scripts/openstack/manifest-tests/server-groups/test_machinesets.py
|
saqibali-2k/installer
|
170a59be823af62f760632b2611538b9022ec174
|
[
"Apache-2.0"
] | 1,369
|
2018-06-08T15:15:34.000Z
|
2022-03-31T11:58:28.000Z
|
scripts/openstack/manifest-tests/server-groups/test_machinesets.py
|
Montana/installer
|
9eade28a9ce4862a6ef092bc5f5fcfb499342d4d
|
[
"Apache-2.0"
] | 5,738
|
2018-06-08T19:17:30.000Z
|
2022-03-31T23:54:17.000Z
|
scripts/openstack/manifest-tests/server-groups/test_machinesets.py
|
Montana/installer
|
9eade28a9ce4862a6ef092bc5f5fcfb499342d4d
|
[
"Apache-2.0"
] | 1,247
|
2018-06-08T17:05:33.000Z
|
2022-03-31T19:34:43.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import sys
import glob
import yaml
ASSETS_DIR = ""
class TestMachinesetsServerGroup(unittest.TestCase):
def setUp(self):
"""Parse the MachineSets into a Python data structure."""
self.machinesets = []
for machineset_path in glob.glob(
f'{ASSETS_DIR}/openshift/99_openshift-cluster-api_worker-machineset-*.yaml'
):
with open(machineset_path) as f:
self.machinesets.append(yaml.load(f, Loader=yaml.FullLoader))
def test_consistent_group_names(self):
"""Assert that server group names are unique across machinesets."""
found = []
for machineset in self.machinesets:
name = machineset["spec"]["template"]["spec"]["providerSpec"][
"value"]["serverGroupName"]
self.assertNotIn(name, found)
found.append(name)
if __name__ == '__main__':
ASSETS_DIR = sys.argv.pop()
unittest.main(verbosity=2)
| 28.416667
| 91
| 0.629521
|
8898292bd0c294a80cf64a46eabab27defc980b4
| 180
|
py
|
Python
|
bin/multiline.py
|
calcu16/canary
|
e2bb4444b07226ad3b092f87c73a037921b220d6
|
[
"BSD-2-Clause"
] | 1
|
2018-11-11T00:48:17.000Z
|
2018-11-11T00:48:17.000Z
|
bin/multiline.py
|
calcu16/canary
|
e2bb4444b07226ad3b092f87c73a037921b220d6
|
[
"BSD-2-Clause"
] | null | null | null |
bin/multiline.py
|
calcu16/canary
|
e2bb4444b07226ad3b092f87c73a037921b220d6
|
[
"BSD-2-Clause"
] | 1
|
2018-11-11T00:48:22.000Z
|
2018-11-11T00:48:22.000Z
|
#/bin/env python3
from __future__ import print_function
from sys import stdin
for line in stdin:
if line[-2] == '\\':
print(line[:-2], end='')
else:
print(line, end='')
| 20
| 37
| 0.638889
|
0dca5fb9efb314d3ddec07eacb25bd1f506a1ebc
| 11,378
|
py
|
Python
|
files/myModellib.py
|
JessevanKempen/nutils
|
a10ae3ca9f71b81ac5d64179555ef2cddf6658da
|
[
"MIT"
] | null | null | null |
files/myModellib.py
|
JessevanKempen/nutils
|
a10ae3ca9f71b81ac5d64179555ef2cddf6658da
|
[
"MIT"
] | null | null | null |
files/myModellib.py
|
JessevanKempen/nutils
|
a10ae3ca9f71b81ac5d64179555ef2cddf6658da
|
[
"MIT"
] | null | null | null |
from nutils import mesh, function, solver, util, export, cli, testing
import numpy as np, treelog
from CoolProp.CoolProp import PropsSI
import scipy.special as sc
from matplotlib import pyplot as plt
from scipy.stats import norm
from matplotlib import collections, colors
import pandas as pd
# import seaborn as sns
import matplotlib.pyplot as plt
import math
#################### Reservoir model library #########################
#Analytical solutions
def get_p_drawdown(H, φ, K, ct, Q, R, pref, t1):
# Initialize parameters
Jw = Q / H
eta = K / (φ * ct)
# Initialize domain
# pref = domain[0]
# R = domain[1]
# Compute drawdown pressure
ei = sc.expi(-R ** 2 / (4 * eta * t1))
dp = (Jw * ei / (4 * math.pi * K))
pexd = (pref + dp)
return pexd
def get_p_buildup(H, φ, K, ct, Q, R, pref, t1end, t2):
# Initialize parameters
Jw = Q / H
eta = K / (φ * ct)
# Initialize domain
# pref = domain[0]
# R = domain[1]
# Compute buildup pressure
eid = sc.expi(-R**2 / (4 * eta * t1end))
eib = sc.expi(-R**2 / (4 * eta * (t2-t1end)-t1end))
dp = (Jw * (eid - eib) / (4 * math.pi * K))
pexb = (pref + dp)
return pexb
def get_T_drawdown(H, φ, K, ct, Q, R, Tref, t1, cpratio, phieff=0, constantjt=2e-7):
# Initialize parameters
Jw = Q / H
eta = K / (φ * ct)
# Compute drawdown pressure
aconstant = ( cpratio * Jw) / (4 * math.pi * eta)
ei = sc.expi(-R ** 2 / (4 * eta * t1))
dp = (Jw * ei / (4 * math.pi * K))
# Compute drawdown temperature
Tei = sc.expi(-R ** 2 / (4 * eta * t1) - aconstant)
Tex = Tref + (constantjt * dp) - Jw / (4 * math.pi * K) * (phieff - constantjt ) * Tei
return Tex
def get_T_buildup(H, φ, K, ct, Q, R, Tref, t1end, t2, cpratio, cp, ρ, λ, phieff=0, constantjt=2e-7, latetime=60):
# Initialize parameters
Jw = Q / H
eta = K / (φ * ct)
# Import drawdown temperature
Tex = get_T_drawdown(H, φ, K, ct, Q, R, Tref, t1end, cpratio, phieff=0, constantjt=2e-7)
if (t2-t1end < latetime):
#early-time buildup solution
earlyTei = sc.expi(-R ** 2 / (4 * eta * t2-t1end))
Tex2 = Tex - earlyTei * phieff * Jw / (4 * math.pi * K)
else:
#late-time buildup solution
lateTei = sc.expi(-R**2 * cp * ρ / (4 * λ * t2-t1end))
Tex2 = Tex - lateTei * constantjt * Jw / (4 * math.pi * K)
return Tex2
def get_dp_drawdown(H, φ, K, ct, Q, R, t1):
# Initialize parameters
Jw = Q / H
eta = K / (φ * ct)
# Initialize domain
# pref = domain[0]
# R = domain[1]
# Compute drawdown gradient pressure
ei = sc.expi(-R ** 2 / (4 * eta * t1))
pgrad = (2 * Jw * ei / (4 * math.pi * K * R))
return pgrad, sd_p
def get_dp_buildup(H, φ, K, ct, Q, R, pref, t1end, t2):
# Initialize parameters
Jw = Q / H
eta = K / (φ * ct)
# Initialize domain
# pref = domain[0]
# R = domain[1]
# Compute drawdown gradient pressure
ei = sc.expi(-R ** 2 / (4 * eta * t1))
pgrad = (2 * Jw * ei / (4 * math.pi * K * R))
return pgrad
#Analytical solutions modified for FEA
def panalyticaldrawdown(ns, t1, R):
# Initialize parameters
ns = ns.copy_()
ns.eta = ns.K / (ns.φ * ns.ct)
# Compute drawdown pressure
ei = sc.expi((-R**2 / (4 * ns.eta * t1)).eval())
dp = (ns.Jw * ei / (4 * math.pi * ns.K)).eval()
pexd = (ns.pref + dp).eval()
return pexd
def panalyticalbuildup(ns, t1end, t2, R):
# Initialize parameters
ns = ns.copy_()
ns.eta = ns.K / (ns.φ * ns.ct)
# Compute buildup pressure
eid = sc.expi((-R**2 / (4 * ns.eta * t1end)).eval())
eib = sc.expi((-R**2 / (4 * ns.eta * (t2 - t1end)-t1end)).eval())
dp = (ns.Jw * (eid - eib) / (4 * math.pi * ns.K)).eval()
pexb = (ns.pref + dp).eval()
return pexb
def dpanalyticaldrawdown(ns, t1, R):
ns = ns.copy_()
ns.eta = ns.K / (ns.φ * ns.ct)
ei = sc.expi((-R ** 2 / (4 * ns.eta * t1)).eval())
pgrad = (2 * ns.Jw * ei / (4 * math.pi * ns.K * R)).eval()
return pgrad
def Tanalyticaldrawdown(ns, t1, R):
ns = ns.copy_()
ns.eta = ns.K / (ns.φ * ns.ct)
aconstant = ( ns.cpratio * ns.Jw) / (4 * math.pi * ns.eta)
ei = sc.expi((-R**2 / (4 * ns.eta * t1)).eval())
pressuredif = (-ns.Jw * ei / (4 * math.pi * ns.K)).eval()
Tei = sc.expi((-R**2/(4*ns.eta*t1) - aconstant).eval())
Tex = (ns.Tref - (ns.constantjt * pressuredif) + ns.Jw / (4 * math.pi * ns.K) * (ns.phieff - ns.constantjt ) * Tei).eval()
print("T drawdown FEA", Tex)
return Tex
def Tanalyticalbuildup(ns, endtime, t2, R):
ns = ns.copy_()
constantjt = ns.constantjt
phieff = ns.phieff
ns.eta = ns.K / (ns.φ * ns.ct)
Tex = Tanalyticaldrawdown(ns, endtime, R)
latetime = 60
if (t2-endtime < latetime):
#early-time buildup solution
earlyTei = sc.expi((-R ** 2 / (4 * ns.eta * t2-endtime)).eval())
Tex2 = Tex - (earlyTei * phieff * ns.Jw / (4 * math.pi * ns.K)).eval()
else:
#late-time buildup solution
lateTei = sc.expi((-R**2 * ns.cp * ns.ρ / (4 * ns.λ * t2-endtime)).eval())
Tex2 = Tex - (lateTei * constantjt * ns.Jw / (4 * math.pi * ns.K)).eval()
return Tex2
#Others
def RefineBySDF(topo, radius, sdf, nrefine):
refined_topo = topo
for n in range(nrefine):
elems_to_refine = []
k = 0
bez = refined_topo.sample('bezier',2)
sd = bez.eval(sdf)
sd = sd.reshape( [len(sd)//4, 4] )
for i in range(len(sd)):
if any(sd[i,:] == radius.eval()):
elems_to_refine.append(k)
k = k + 1
refined_topo = refined_topo.refined_by(refined_topo.transforms[np.array(elems_to_refine)])
return refined_topo
#Indirect welldata from internet
def get_welldata(parameter):
welldata = pd.read_excel(r'C:\Users\s141797\OneDrive - TU Eindhoven\Scriptie\nlog_welldata.xlsx') #for an earlier version of Excel use 'xls'
columns = ['TBH', 'TESP', 'PBH', 'PESP', 'Q', 'CORRECTED_TIME']
df = pd.DataFrame(welldata, columns = columns)
return np.array(df.loc[:, parameter]) #np.array(df['PRESSURE']), np.array(df['TEMPERATURE'])
#Postprocessing
nanjoin = lambda array, tri: np.insert(array.take(tri.flat, 0).astype(float),
slice(tri.shape[1], tri.size, tri.shape[1]), np.nan,
axis=0)
def plotdrawdown_1D(ns, bezier, x, p, TT, t1):
""" Exports figures to public.html for the pressure and temperature 1D radial profile along the reservoir
Arguments:
ns (?): Namespace
bezier (?): Parametric curve
x (array): Radial position
p (array): Fluid pressure
T (array): System (Solid + Fluid) temperature
t1 (float): Time of drawdown period
Returns:
pressure1d (png): graph of 1D radial pressure
temperature1d (png): graph of 1D radial temperature
"""
with export.mplfigure('pressure1d.png', dpi=800) as plt:
ax = plt.subplots()
ax.set(xlabel='Distance [m]', ylabel='Pressure [MPa]')
ax.set_ylim([20, 23])
ax.plot(nanjoin(x[:, 0], bezier.tri)[::100], nanjoin(p, bezier.tri)[::100] / 1e6, label="FEM")
ax.plot(x[:, 0][::100],
np.array(panalyticaldrawdown(ns, t1, x[:, 0]))[0][0][0][::100] / 1e6,
label="analytical")
ax.legend(loc="center right")
with export.mplfigure('temperature1d.png', dpi=800) as plt:
ax = plt.subplots()
ax.set(xlabel='Distance [m]', ylabel='Temperature [K]')
ax.plot(nanjoin(x[:, 0], bezier.tri)[0:100000:10], nanjoin(TT, bezier.tri)[0:100000:10], label="FEM")
ax.plot(nanjoin(x[:, 0], bezier.tri)[0:100000:10],
np.array(Tanalyticaldrawdown(ns, t1, nanjoin(x[:, 0], bezier.tri)))[0][0][0][0:100000:10],
label="analytical")
ax.legend(loc="center right")
def plotbuildup_1D(ns, bezier, x, p, TT, endtime, t2):
""" Exports figures to public.html for the pressure and temperature 1D radial profile along the reservoir
Arguments:
ns (?): Namespace
bezier (?): Parametric curve
x (array): Radial position
p (array): Fluid pressure
T (array): System (Solid + Fluid) temperature
endtime (float): Time that drawdown period ended
t2 (float): Time of buildup period
Returns:
pressure1d (png): graph of 1D radial pressure
temperature1d (png): graph of 1D radial temperature
"""
with export.mplfigure('pressure1d.png', dpi=800) as plt:
ax = plt.subplots()
ax.set(xlabel='Distance [m]', ylabel='Pressure [MPa]')
ax.plot(nanjoin(x[:, 0], bezier.tri)[::100], nanjoin(p, bezier.tri)[::100] / 1e6, label="FEM")
ax.plot(x[:, 0][::100],
np.array(panalyticalbuildup(ns, endtime, t2, x[:, 0]))[0][0][0][
::100] / 1e6, label="analytical")
ax.legend(loc="center right")
with export.mplfigure('temperature1d.png', dpi=800) as plt:
ax = plt.subplots()
ax.set(xlabel='Distance [m]', ylabel='Temperature [K]')
# ax.set_ylim([362.85, 363.02])
ax.plot(nanjoin(x[:, 0], bezier.tri)[0:100000:10], nanjoin(TT, bezier.tri)[0:100000:10],
label="FEM")
ax.plot(nanjoin(x[:, 0], bezier.tri)[0:100000:10],
np.array(Tanalyticalbuildup(ns, endtime, t2, nanjoin(x[:, 0], bezier.tri)[0:100000:10]))[0][
0][0],
label="analytical")
ax.legend(loc="center right")
def plotovertime(timeperiod, parraywell, parrayexact, Tarraywell, Tarrayexact, Qarray):
with export.mplfigure('pressuretime.png', dpi=800) as plt:
ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.set(xlabel='Time [s]')
ax1.set_ylabel('Pressure [MPa]', color='b')
ax2.set_ylabel('Volumetric flow rate [m^3/s]', color='k')
ax1.plot(timeperiod, parraywell / 1e6, 'bo', label="FEM")
ax1.plot(timeperiod, parrayexact / 1e6, label="analytical")
# ax1.plot(timeperiod, parrayexp, label="NLOG")
ax1.legend(loc="center right")
ax2.plot(timeperiod, Qarray, 'k')
# with export.mplfigure('pressuretimeerror.png', dpi=800) as plt:
# ax1 = plt.subplots()
# ax2 = ax1.twinx()
# ax1.set(xlabel='Time [s]')
# ax1.set(ylabel=r'$\left(\left|p_{w}-{p}_{w,exact}\right|/\left|p_{w,0}\right|\right)$', yscale="log")
# ax2.set_ylabel('Volumetric flow rate [m^3/s]', color='k')
# ax1.plot(timeperiod, parrayerror / 225e5, 'bo', label=r'$r_{dr} = 1000m$ refined mesh')
# ax1.set_ylim(ymin=0.00005)
# ax1.legend(loc="center right")
# ax2.plot(timeperiod, Qarray, 'k')
with export.mplfigure('temperaturetime.png', dpi=800) as plt:
ax1 = plt.subplots()
ax2 = ax1.twinx()
# ax1.set_ylim([362.9, 363.1])
ax1.set(xlabel='Time [s]')
ax1.set_ylabel('Temperature [K]', color='b')
ax2.set_ylabel('Volumetric flow rate [m^3/s]', color='k')
ax1.plot(timeperiod, Tarraywell, 'ro', label="FEM")
ax1.plot(timeperiod, Tarrayexact, label="analytical")
ax1.legend(loc="center right")
ax2.plot(timeperiod, Qarray, 'k')
| 34.795107
| 144
| 0.569081
|
fe7104aacf148d6e6612a3a11368f393e31c636f
| 1,216
|
py
|
Python
|
examples/notes_server.py
|
MatthewScholefield/flask-resto
|
19630accb55919817712e08536caaa93a3a85a7d
|
[
"MIT"
] | 2
|
2020-04-23T08:06:51.000Z
|
2020-04-23T10:14:13.000Z
|
examples/notes_server.py
|
MatthewScholefield/flask-resto
|
19630accb55919817712e08536caaa93a3a85a7d
|
[
"MIT"
] | null | null | null |
examples/notes_server.py
|
MatthewScholefield/flask-resto
|
19630accb55919817712e08536caaa93a3a85a7d
|
[
"MIT"
] | null | null | null |
from flask import Flask
from uuid import uuid4
from werkzeug.exceptions import NotFound
from flask_resto import Api
app = Flask(__name__)
api = Api(app)
notes = {} # In practice, use a database
class Note:
def create(self):
note = {
'title': api.json['title'],
'description': api.json['description'],
'uuid': str(uuid4())
}
notes[note['uuid']] = note
return note
def delete(self, uuid):
Note.get_note(uuid)
del notes[uuid]
def get(self, uuid):
return Note.get_note(uuid)
def get_all(self):
return notes
def update(self, uuid):
note = Note.get_note(uuid)
note.update(api.json)
return note
@staticmethod
def get_note(uuid):
note = notes.get(uuid)
if not note:
raise NotFound(uuid)
return note
api.resources = {
'/v1': {
'/note': {
'POST': Note.create,
'GET': Note.get_all,
'/<uuid>': {
'GET': Note.get,
'PATCH': Note.update,
'DELETE': Note.delete
}
}
}
}
if __name__ == '__main__':
app.run()
| 19.612903
| 51
| 0.514803
|
ef61f63ddcd4a8e05b444ad7ff624e59849414ec
| 10,130
|
py
|
Python
|
qa/rpc-tests/listtransactions.py
|
uniocoin/unionew
|
91951af8a98fb85eefa556d52cff5c1bd52a2e33
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/listtransactions.py
|
uniocoin/unionew
|
91951af8a98fb85eefa556d52cff5c1bd52a2e33
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/listtransactions.py
|
uniocoin/unionew
|
91951af8a98fb85eefa556d52cff5c1bd52a2e33
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, COIN
from io import BytesIO
def txFromHex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(BitcoinTestFramework):
def setup_nodes(self):
#This test requires mocktime
enable_mocktime()
return start_nodes(4, self.options.tmpdir)
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
# rbf is disabled in Unio Core
# self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = bytes_to_hex_str(tx3_modified.serialize())
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = bytes_to_hex_str(tx3_b.serialize())
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
| 50.148515
| 113
| 0.598421
|
d858a84b22a29607044f0ec49d321ea28353b2e5
| 1,028
|
py
|
Python
|
kubernetes/test/test_v1_controller_revision_list.py
|
iamneha/python
|
5b208a1a49a8d6f8bbab28bcc226b9ef793bcbd0
|
[
"Apache-2.0"
] | 1
|
2019-02-17T15:28:39.000Z
|
2019-02-17T15:28:39.000Z
|
kubernetes/test/test_v1_controller_revision_list.py
|
iamneha/python
|
5b208a1a49a8d6f8bbab28bcc226b9ef793bcbd0
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_controller_revision_list.py
|
iamneha/python
|
5b208a1a49a8d6f8bbab28bcc226b9ef793bcbd0
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_controller_revision_list import V1ControllerRevisionList
class TestV1ControllerRevisionList(unittest.TestCase):
""" V1ControllerRevisionList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ControllerRevisionList(self):
"""
Test V1ControllerRevisionList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_controller_revision_list.V1ControllerRevisionList()
pass
if __name__ == '__main__':
unittest.main()
| 22.844444
| 105
| 0.727626
|
6b4920efe2917bcdc6238a1210371fcfe6fc91c5
| 34,850
|
py
|
Python
|
neural_tangents/tests/predict_test.py
|
kayhan-batmanghelich/neural-tangents
|
432ed919fca4a6f2c22071399b79bff9b38880c2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
neural_tangents/tests/predict_test.py
|
kayhan-batmanghelich/neural-tangents
|
432ed919fca4a6f2c22071399b79bff9b38880c2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
neural_tangents/tests/predict_test.py
|
kayhan-batmanghelich/neural-tangents
|
432ed919fca4a6f2c22071399b79bff9b38880c2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `utils/predict.py`."""
import math
from jax import test_util as jtu
from jax.api import device_get
from jax.api import grad
from jax.api import jit
from jax.api import vmap
from jax.config import config
from jax.experimental import optimizers
from jax.lib import xla_bridge
import jax.numpy as np
import jax.random as random
from neural_tangents import predict
from neural_tangents import stax
from neural_tangents.utils import batch
from neural_tangents.utils import empirical
from neural_tangents.utils import utils
config.parse_flags_with_absl()
MATRIX_SHAPES = [(3, 3), (4, 4)]
OUTPUT_LOGITS = [1, 2, 3]
GETS = ('ntk', 'nngp', ('ntk', 'nngp'))
RTOL = 0.1
ATOL = 0.1
if not config.read('jax_enable_x64'):
RTOL = 0.2
ATOL = 0.2
FLAT = 'FLAT'
POOLING = 'POOLING'
# TODO(schsam): Add a pooling test when multiple inputs are supported in
# Conv + Pooling.
TRAIN_SHAPES = [(4, 8), (8, 8), (6, 4, 4, 3)]
TEST_SHAPES = [(6, 8), (16, 8), (2, 4, 4, 3)]
NETWORK = [FLAT, FLAT, FLAT, FLAT]
OUTPUT_LOGITS = [1, 2]
CONVOLUTION_CHANNELS = 256
utils.update_test_tolerance()
def _build_network(input_shape, network, out_logits):
if len(input_shape) == 1:
assert network == 'FLAT'
return stax.serial(
stax.Dense(4096, W_std=1.2, b_std=0.05), stax.Erf(),
stax.Dense(out_logits, W_std=1.2, b_std=0.05))
elif len(input_shape) == 3:
if network == 'POOLING':
return stax.serial(
stax.Conv(CONVOLUTION_CHANNELS, (3, 3), W_std=2.0, b_std=0.05),
stax.GlobalAvgPool(), stax.Dense(out_logits, W_std=2.0, b_std=0.05))
elif network == 'FLAT':
return stax.serial(
stax.Conv(CONVOLUTION_CHANNELS, (3, 3), W_std=2.0, b_std=0.05),
stax.Flatten(), stax.Dense(out_logits, W_std=2.0, b_std=0.05))
else:
raise ValueError('Unexpected network type found: {}'.format(network))
else:
raise ValueError('Expected flat or image test input.')
def _empirical_kernel(key, input_shape, network, out_logits):
init_fn, f, _ = _build_network(input_shape, network, out_logits)
_, params = init_fn(key, (-1,) + input_shape)
_kernel_fn = empirical.empirical_kernel_fn(f)
kernel_fn = lambda x1, x2, get: _kernel_fn(x1, x2, params, get)
return params, f, jit(kernel_fn, static_argnums=(2,))
def _theoretical_kernel(key, input_shape, network, out_logits):
init_fn, f, kernel_fn = _build_network(input_shape, network, out_logits)
_, params = init_fn(key, (-1,) + input_shape)
return params, f, jit(kernel_fn, static_argnums=(2,))
KERNELS = {
'empirical': _empirical_kernel,
'theoretical': _theoretical_kernel,
}
@optimizers.optimizer
def momentum(learning_rate, momentum=0.9):
"""A standard momentum optimizer for testing.
Different from `jax.experimental.optimizers.momentum` (Nesterov).
"""
learning_rate = optimizers.make_schedule(learning_rate)
def init_fn(x0):
v0 = np.zeros_like(x0)
return x0, v0
def update_fn(i, g, state):
x, velocity = state
velocity = momentum * velocity + g
x = x - learning_rate(i) * velocity
return x, velocity
def get_params(state):
x, _ = state
return x
return init_fn, update_fn, get_params
class PredictTest(jtu.JaxTestCase):
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name':
'_train={}_network={}_logits={}_{}'.format(
train, network, out_logits, name),
'train_shape':
train,
'network':
network,
'out_logits':
out_logits,
'fn_and_kernel':
fn,
'name':
name,
} for train, network in zip(TRAIN_SHAPES, NETWORK)
for out_logits in OUTPUT_LOGITS
for name, fn in KERNELS.items()))
def testMaxLearningRate(self, train_shape, network, out_logits, fn_and_kernel,
name):
key = random.PRNGKey(0)
key, split = random.split(key)
if len(train_shape) == 2:
train_shape = (train_shape[0] * 5, train_shape[1] * 10)
else:
train_shape = (16, 8, 8, 3)
x_train = random.normal(split, train_shape)
key, split = random.split(key)
y_train = np.array(
random.bernoulli(split, shape=(train_shape[0], out_logits)), np.float32)
for lr_factor in [0.5, 3.]:
params, f, ntk = fn_and_kernel(key, train_shape[1:], network, out_logits)
# Regress to an MSE loss.
loss = lambda params, x: \
0.5 * np.mean((f(params, x) - y_train) ** 2)
grad_loss = jit(grad(loss))
g_dd = ntk(x_train, None, 'ntk')
steps = 20
if name == 'theoretical':
step_size = predict.max_learning_rate(
g_dd, num_outputs=out_logits) * lr_factor
else:
step_size = predict.max_learning_rate(g_dd, num_outputs=-1) * lr_factor
opt_init, opt_update, get_params = optimizers.sgd(step_size)
opt_state = opt_init(params)
def get_loss(opt_state):
return loss(get_params(opt_state), x_train)
init_loss = get_loss(opt_state)
for i in range(steps):
params = get_params(opt_state)
opt_state = opt_update(i, grad_loss(params, x_train), opt_state)
trained_loss = get_loss(opt_state)
loss_ratio = trained_loss / (init_loss + 1e-12)
if lr_factor == 3.:
if not math.isnan(loss_ratio):
self.assertGreater(loss_ratio, 10.)
else:
self.assertLess(loss_ratio, 0.1)
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name':
'_train={}_test={}_network={}_logits={}_{}'.format(
train, test, network, out_logits, name),
'train_shape':
train,
'test_shape':
test,
'network':
network,
'out_logits':
out_logits,
'fn_and_kernel':
fn
} for train, test, network in zip(TRAIN_SHAPES, TEST_SHAPES, NETWORK)
for out_logits in OUTPUT_LOGITS
for name, fn in KERNELS.items()))
def testNTKMSEPrediction(self, train_shape, test_shape, network, out_logits,
fn_and_kernel):
key = random.PRNGKey(0)
key, split = random.split(key)
x_train = random.normal(split, train_shape)
key, split = random.split(key)
y_train = np.array(
random.bernoulli(split, shape=(train_shape[0], out_logits)), np.float32)
key, split = random.split(key)
x_test = random.normal(split, test_shape)
params, f, ntk = fn_and_kernel(key, train_shape[1:], network, out_logits)
# Regress to an MSE loss.
loss = lambda params, x: \
0.5 * np.mean((f(params, x) - y_train) ** 2)
grad_loss = jit(grad(loss))
g_dd = ntk(x_train, None, 'ntk')
g_td = ntk(x_test, x_train, 'ntk')
predictor = predict.gradient_descent_mse(g_dd, y_train, g_td)
predictor_train = predict.gradient_descent_mse(g_dd, y_train)
atol = ATOL
rtol = RTOL
step_size = 0.1
if len(train_shape) > 2:
# Hacky way to up the tolerance just for convolutions.
atol = ATOL * 2
rtol = RTOL * 2
step_size = 0.1
train_time = 100.0
steps = int(train_time / step_size)
opt_init, opt_update, get_params = optimizers.sgd(step_size)
opt_state = opt_init(params)
fx_initial_train = f(params, x_train)
fx_initial_test = f(params, x_test)
fx_pred_train, fx_pred_test = predictor(0.0, fx_initial_train,
fx_initial_test)
fx_pred_train_only = predictor_train(0.0, fx_initial_train)
self.assertAllClose(fx_initial_train, fx_pred_train, True)
self.assertAllClose(fx_initial_train, fx_pred_train_only, True)
self.assertAllClose(fx_initial_test, fx_pred_test, True)
for i in range(steps):
params = get_params(opt_state)
opt_state = opt_update(i, grad_loss(params, x_train), opt_state)
params = get_params(opt_state)
fx_train = f(params, x_train)
fx_test = f(params, x_test)
fx_pred_train, fx_pred_test = predictor(train_time, fx_initial_train,
fx_initial_test)
fx_pred_train_only = predictor_train(train_time, fx_initial_train)
fx_disp_train = np.sqrt(np.mean((fx_train - fx_initial_train)**2))
fx_disp_test = np.sqrt(np.mean((fx_test - fx_initial_test)**2))
fx_error_train = (fx_train - fx_pred_train) / fx_disp_train
fx_error_train_only = (fx_pred_train_only - fx_pred_train) / fx_disp_train
fx_error_test = (fx_test - fx_pred_test) / fx_disp_test
self.assertAllClose(fx_error_train, np.zeros_like(fx_error_train), True,
rtol, atol)
self.assertAllClose(fx_error_train_only, np.zeros_like(fx_error_train_only),
True, rtol, atol)
self.assertAllClose(fx_error_test, np.zeros_like(fx_error_test), True, rtol,
atol)
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name':
'_train={}_test={}_network={}_logits={}_{}'.format(
train, test, network, out_logits, name),
'train_shape':
train,
'test_shape':
test,
'network':
network,
'out_logits':
out_logits,
'fn_and_kernel':
fn
} for train, test, network in zip(TRAIN_SHAPES, TEST_SHAPES, NETWORK)
for out_logits in OUTPUT_LOGITS
for name, fn in KERNELS.items()))
def testNTKGDPrediction(self, train_shape, test_shape, network, out_logits,
fn_and_kernel):
key = random.PRNGKey(0)
key, split = random.split(key)
x_train = random.normal(split, train_shape)
key, split = random.split(key)
y_train = np.array(
random.bernoulli(split, shape=(train_shape[0], out_logits)), np.float32)
key, split = random.split(key)
x_test = random.normal(split, test_shape)
params, f, ntk = fn_and_kernel(key, train_shape[1:], network, out_logits)
# Regress to an MSE loss.
loss = lambda y, y_hat: 0.5 * np.mean((y - y_hat)**2)
grad_loss = jit(grad(lambda params, x: loss(f(params, x), y_train)))
g_dd = ntk(x_train, None, 'ntk')
g_td = ntk(x_test, x_train, 'ntk')
predictor = predict.gradient_descent(g_dd, y_train, loss, g_td)
atol = ATOL
rtol = RTOL
step_size = 0.5
if len(train_shape) > 2:
# Hacky way to up the tolerance just for convolutions.
atol = ATOL * 2
rtol = RTOL * 2
step_size = 0.1
train_time = 100.0
steps = int(train_time / step_size)
opt_init, opt_update, get_params = optimizers.sgd(step_size)
opt_state = opt_init(params)
fx_initial_train = f(params, x_train)
fx_initial_test = f(params, x_test)
fx_pred_train, fx_pred_test = predictor(0.0, fx_initial_train,
fx_initial_test)
self.assertAllClose(fx_initial_train, fx_pred_train, True)
self.assertAllClose(fx_initial_test, fx_pred_test, True)
for i in range(steps):
params = get_params(opt_state)
opt_state = opt_update(i, grad_loss(params, x_train), opt_state)
params = get_params(opt_state)
fx_train = f(params, x_train)
fx_test = f(params, x_test)
fx_pred_train, fx_pred_test = predictor(train_time, fx_initial_train,
fx_initial_test)
fx_disp_train = np.sqrt(np.mean((fx_train - fx_initial_train)**2))
fx_disp_test = np.sqrt(np.mean((fx_test - fx_initial_test)**2))
fx_error_train = (fx_train - fx_pred_train) / fx_disp_train
fx_error_test = (fx_test - fx_pred_test) / fx_disp_test
self.assertAllClose(fx_error_train, np.zeros_like(fx_error_train), True,
rtol, atol)
self.assertAllClose(fx_error_test, np.zeros_like(fx_error_test), True, rtol,
atol)
# TODO(schsam): Get this test passing with theoretical conv.
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name':
'_train={}_test={}_network={}_logits={}_{}'.format(
train, test, network, out_logits, name),
'train_shape':
train,
'test_shape':
test,
'network':
network,
'out_logits':
out_logits,
'fn_and_kernel':
fn
} for train, test, network in zip(TRAIN_SHAPES, TEST_SHAPES, NETWORK)
for out_logits in OUTPUT_LOGITS
for name, fn in KERNELS.items()
if len(train) == 2))
def testNTKMomentumPrediction(self, train_shape, test_shape, network,
out_logits, fn_and_kernel):
key = random.PRNGKey(0)
key, split = random.split(key)
x_train = random.normal(split, train_shape)
key, split = random.split(key)
y_train = np.array(
random.bernoulli(split, shape=(train_shape[0], out_logits)), np.float32)
key, split = random.split(key)
x_test = random.normal(split, test_shape)
params, f, ntk = fn_and_kernel(key, train_shape[1:], network, out_logits)
# Regress to an MSE loss.
loss = lambda y, y_hat: 0.5 * np.mean((y - y_hat)**2)
grad_loss = jit(grad(lambda params, x: loss(f(params, x), y_train)))
g_dd = ntk(x_train, None, 'ntk')
g_td = ntk(x_test, x_train, 'ntk')
atol = ATOL
rtol = RTOL
step_size = 0.5
if len(train_shape) > 2:
# Hacky way to up the tolerance just for convolutions.
atol = ATOL * 2
rtol = RTOL * 2
step_size = 0.1
train_time = 100.0
steps = int(train_time / np.sqrt(step_size))
init, predictor, get = predict.momentum(g_dd, y_train, loss, step_size,
g_td)
opt_init, opt_update, get_params = momentum(step_size, 0.9)
opt_state = opt_init(params)
fx_initial_train = f(params, x_train)
fx_initial_test = f(params, x_test)
lin_state = init(fx_initial_train, fx_initial_test)
fx_pred_train, fx_pred_test = get(lin_state)
self.assertAllClose(fx_initial_train, fx_pred_train, True)
self.assertAllClose(fx_initial_test, fx_pred_test, True)
for i in range(steps):
params = get_params(opt_state)
opt_state = opt_update(i, grad_loss(params, x_train), opt_state)
params = get_params(opt_state)
fx_train = f(params, x_train)
fx_test = f(params, x_test)
lin_state = predictor(lin_state, train_time)
fx_pred_train, fx_pred_test = get(lin_state)
fx_disp_train = np.sqrt(np.mean((fx_train - fx_initial_train)**2))
fx_disp_test = np.sqrt(np.mean((fx_test - fx_initial_test)**2))
fx_error_train = (fx_train - fx_pred_train) / fx_disp_train
fx_error_test = (fx_test - fx_pred_test) / fx_disp_test
self.assertAllClose(fx_error_train, np.zeros_like(fx_error_train), True,
rtol, atol)
self.assertAllClose(fx_error_test, np.zeros_like(fx_error_test), True, rtol,
atol)
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name':
'_train={}_test={}_network={}_logits={}'.format(
train, test, network, out_logits),
'train_shape':
train,
'test_shape':
test,
'network':
network,
'out_logits':
out_logits,
}
for train, test, network in zip(
TRAIN_SHAPES[:-1], TEST_SHAPES[:-1], NETWORK[:-1])
for out_logits in OUTPUT_LOGITS))
def testNTKMeanCovPrediction(self, train_shape, test_shape, network,
out_logits):
key = random.PRNGKey(0)
key, split = random.split(key)
x_train = np.cos(random.normal(split, train_shape))
key, split = random.split(key)
y_train = np.array(
random.bernoulli(split, shape=(train_shape[0], out_logits)), np.float32)
key, split = random.split(key)
x_test = np.cos(random.normal(split, test_shape))
_, _, kernel_fn = _build_network(train_shape[1:], network, out_logits)
mean_pred, cov_pred = predict.gp_inference(
kernel_fn,
x_train,
y_train,
x_test,
'ntk',
diag_reg=0.,
compute_cov=True)
if xla_bridge.get_backend().platform == 'tpu':
eigh = np.onp.linalg.eigh
else:
eigh = np.linalg.eigh
self.assertEqual(cov_pred.shape[0], x_test.shape[0])
min_eigh = np.min(eigh(cov_pred)[0])
self.assertGreater(min_eigh + 1e-10, 0.)
def mc_sampling(count=10):
key = random.PRNGKey(100)
init_fn, f, _ = _build_network(train_shape[1:], network, out_logits)
_kernel_fn = empirical.empirical_kernel_fn(f)
kernel_fn = jit(lambda x1, x2, params: _kernel_fn(x1, x2, params, 'ntk'))
collect_test_predict = []
for _ in range(count):
key, split = random.split(key)
_, params = init_fn(split, train_shape)
g_dd = kernel_fn(x_train, None, params)
g_td = kernel_fn(x_test, x_train, params)
predictor = predict.gradient_descent_mse(g_dd, y_train, g_td)
fx_initial_train = f(params, x_train)
fx_initial_test = f(params, x_test)
_, fx_pred_test = predictor(1.0e8, fx_initial_train, fx_initial_test)
collect_test_predict.append(fx_pred_test)
collect_test_predict = np.array(collect_test_predict)
mean_emp = np.mean(collect_test_predict, axis=0)
mean_subtracted = collect_test_predict - mean_emp
cov_emp = np.einsum(
'ijk,ilk->jl', mean_subtracted, mean_subtracted, optimize=True) / (
mean_subtracted.shape[0] * mean_subtracted.shape[-1])
return mean_emp, cov_emp
atol = ATOL
rtol = RTOL
mean_emp, cov_emp = mc_sampling(100)
self.assertAllClose(mean_pred, mean_emp, True, rtol, atol)
self.assertAllClose(cov_pred, cov_emp, True, rtol, atol)
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name':
'_train={}_test={}_network={}_logits={}'.format(
train, test, network, out_logits),
'train_shape':
train,
'test_shape':
test,
'network':
network,
'out_logits':
out_logits,
}
for train, test, network in zip(
TRAIN_SHAPES[:-1], TEST_SHAPES[:-1], NETWORK[:-1])
for out_logits in OUTPUT_LOGITS))
def testGPInferenceGet(self, train_shape, test_shape, network, out_logits):
key = random.PRNGKey(0)
key, split = random.split(key)
x_train = np.cos(random.normal(split, train_shape))
key, split = random.split(key)
y_train = np.array(
random.bernoulli(split, shape=(train_shape[0], out_logits)), np.float32)
key, split = random.split(key)
x_test = np.cos(random.normal(split, test_shape))
_, _, kernel_fn = _build_network(train_shape[1:], network, out_logits)
out = predict.gp_inference(
kernel_fn,
x_train,
y_train,
x_test,
'ntk',
diag_reg=0.,
compute_cov=True)
assert isinstance(out, predict.Gaussian)
out = predict.gp_inference(
kernel_fn,
x_train,
y_train,
x_test,
'nngp',
diag_reg=0.,
compute_cov=True)
assert isinstance(out, predict.Gaussian)
out = predict.gp_inference(
kernel_fn,
x_train,
y_train,
x_test, ('ntk',),
diag_reg=0.,
compute_cov=True)
assert len(out) == 1 and isinstance(out[0], predict.Gaussian)
out = predict.gp_inference(
kernel_fn,
x_train,
y_train,
x_test, ('ntk', 'nngp'),
diag_reg=0.,
compute_cov=True)
assert (len(out) == 2 and isinstance(out[0], predict.Gaussian) and
isinstance(out[1], predict.Gaussian))
out2 = predict.gp_inference(
kernel_fn,
x_train,
y_train,
x_test, ('nngp', 'ntk'),
diag_reg=0.,
compute_cov=True)
self.assertAllClose(out[0], out2[1], True)
self.assertAllClose(out[1], out2[0], True)
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name':
'_train={}_test={}_network={}_logits={}_get={}'.format(
train, test, network, out_logits, get),
'train_shape':
train,
'test_shape':
test,
'network':
network,
'out_logits':
out_logits,
'get':
get,
}
for train, test, network in zip(
TRAIN_SHAPES[:-1], TEST_SHAPES[:-1], NETWORK[:-1])
for out_logits in OUTPUT_LOGITS for get in GETS))
def testInfiniteTimeAgreement(self, train_shape, test_shape, network,
out_logits, get):
key = random.PRNGKey(0)
key, split = random.split(key)
x_train = np.cos(random.normal(split, train_shape))
key, split = random.split(key)
y_train = np.array(
random.bernoulli(split, shape=(train_shape[0], out_logits)), np.float32)
key, split = random.split(key)
x_test = np.cos(random.normal(split, test_shape))
_, _, kernel_fn = _build_network(train_shape[1:], network, out_logits)
reg = 1e-7
prediction = predict.gradient_descent_mse_gp(
kernel_fn,
x_train,
y_train,
x_test,
get,
diag_reg=reg,
compute_cov=True)
finite_prediction = prediction(np.inf)
finite_prediction_none = prediction(None)
gp_inference = predict.gp_inference(kernel_fn, x_train, y_train, x_test,
get, reg, True)
self.assertAllClose(finite_prediction_none, finite_prediction, True)
self.assertAllClose(finite_prediction_none, gp_inference, True)
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name':
'_train={}_test={}_network={}_logits={}'.format(
train, test, network, out_logits),
'train_shape':
train,
'test_shape':
test,
'network':
network,
'out_logits':
out_logits,
}
for train, test, network in zip(
TRAIN_SHAPES[:-1], TEST_SHAPES[:-1], NETWORK[:-1])
for out_logits in OUTPUT_LOGITS))
def testZeroTimeAgreement(self, train_shape, test_shape, network, out_logits):
"""Test that the NTK and NNGP agree at t=0."""
key = random.PRNGKey(0)
key, split = random.split(key)
x_train = np.cos(random.normal(split, train_shape))
key, split = random.split(key)
y_train = np.array(
random.bernoulli(split, shape=(train_shape[0], out_logits)), np.float32)
key, split = random.split(key)
x_test = np.cos(random.normal(split, test_shape))
_, _, ker_fun = _build_network(train_shape[1:], network, out_logits)
reg = 1e-7
prediction = predict.gradient_descent_mse_gp(
ker_fun,
x_train,
y_train,
x_test,
diag_reg=reg,
get=('NTK', 'NNGP'),
compute_cov=True)
zero_prediction = prediction(0.0)
self.assertAllClose(zero_prediction.ntk, zero_prediction.nngp, True)
reference = (np.zeros(
(test_shape[0], out_logits)), ker_fun(x_test, x_test, get='nngp'))
self.assertAllClose((reference,) * 2, zero_prediction, True)
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name':
'_train={}_test={}_network={}_logits={}'.format(
train, test, network, out_logits),
'train_shape':
train,
'test_shape':
test,
'network':
network,
'out_logits':
out_logits,
}
for train, test, network in zip(
TRAIN_SHAPES[:-1], TEST_SHAPES[:-1], NETWORK[:-1])
for out_logits in OUTPUT_LOGITS))
def testNTK_NTKNNGPAgreement(self, train_shape, test_shape, network,
out_logits):
key = random.PRNGKey(0)
key, split = random.split(key)
x_train = np.cos(random.normal(split, train_shape))
key, split = random.split(key)
y_train = np.array(
random.bernoulli(split, shape=(train_shape[0], out_logits)), np.float32)
key, split = random.split(key)
x_test = np.cos(random.normal(split, test_shape))
_, _, ker_fun = _build_network(train_shape[1:], network, out_logits)
reg = 1e-7
prediction = predict.gradient_descent_mse_gp(
ker_fun,
x_train,
y_train,
x_test,
diag_reg=reg,
get='NTK',
compute_cov=True)
ts = np.logspace(-2, 8, 10)
ntk_predictions = [prediction(t).mean for t in ts]
# Create a hacked kernel function that always returns the ntk kernel
def always_ntk(x1, x2, get=('nngp', 'ntk')):
out = ker_fun(x1, x2, get=('nngp', 'ntk'))
if get == 'nngp' or get == 'ntk':
return out.ntk
else:
return out._replace(nngp=out.ntk)
ntk_nngp_prediction = predict.gradient_descent_mse_gp(
always_ntk,
x_train,
y_train,
x_test,
diag_reg=reg,
get='NNGP',
compute_cov=True)
ntk_nngp_predictions = [ntk_nngp_prediction(t).mean for t in ts]
# Test if you use the nngp equations with the ntk, you get the same mean
self.assertAllClose(ntk_predictions, ntk_nngp_predictions, True)
# Next test that if you go through the NTK code path, but with only
# the NNGP kernel, we recreate the NNGP dynamics.
reg = 1e-7
nngp_prediction = predict.gradient_descent_mse_gp(
ker_fun,
x_train,
y_train,
x_test,
diag_reg=reg,
get='NNGP',
compute_cov=True)
# Create a hacked kernel function that always returns the nngp kernel
def always_nngp(x1, x2, get=('nngp', 'ntk')):
out = ker_fun(x1, x2, get=('nngp', 'ntk'))
if get == 'nngp' or get == 'ntk':
return out.nngp
else:
return out._replace(ntk=out.nngp)
nngp_ntk_prediction = predict.gradient_descent_mse_gp(
always_nngp,
x_train,
y_train,
x_test,
diag_reg=reg,
get='NTK',
compute_cov=True)
nngp_cov_predictions = [nngp_prediction(t).covariance for t in ts]
nngp_ntk_cov_predictions = [nngp_ntk_prediction(t).covariance for t in ts]
# Test if you use the ntk equations with the nngp, you get the same cov
# Although, due to accumulation of numerical errors, only roughly.
self.assertAllClose(nngp_cov_predictions, nngp_ntk_cov_predictions, True)
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name':
'_train={}_test={}_network={}_logits={}'.format(
train, test, network, out_logits),
'train_shape':
train,
'test_shape':
test,
'network':
network,
'out_logits':
out_logits,
}
for train, test, network in zip(
TRAIN_SHAPES[:-1], TEST_SHAPES[:-1], NETWORK[:-1])
for out_logits in OUTPUT_LOGITS))
def testNTKPredCovPosDef(self, train_shape, test_shape, network, out_logits):
key = random.PRNGKey(0)
key, split = random.split(key)
x_train = np.cos(random.normal(split, train_shape))
key, split = random.split(key)
y_train = np.array(
random.bernoulli(split, shape=(train_shape[0], out_logits)), np.float32)
key, split = random.split(key)
x_test = np.cos(random.normal(split, test_shape))
_, _, ker_fun = _build_network(train_shape[1:], network, out_logits)
reg = 1e-7
ntk_predictions = predict.gradient_descent_mse_gp(
ker_fun,
x_train,
y_train,
x_test,
diag_reg=reg,
get='ntk',
compute_cov=True)
ts = np.logspace(-2, 8, 10)
ntk_cov_predictions = [ntk_predictions(t).covariance for t in ts]
if xla_bridge.get_backend().platform == 'tpu':
eigh = np.onp.linalg.eigh
else:
eigh = np.linalg.eigh
check_symmetric = np.array(
[np.max(np.abs(cov - cov.T)) for cov in ntk_cov_predictions])
check_pos_evals = np.min(
np.array([eigh(cov)[0] + 1e-10 for cov in ntk_cov_predictions]))
self.assertAllClose(check_symmetric, np.zeros_like(check_symmetric), True)
self.assertGreater(check_pos_evals, 0., True)
@jtu.parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name':
'_train={}_test={}_network={}_logits={}'.format(
train, test, network, out_logits),
'train_shape':
train,
'test_shape':
test,
'network':
network,
'out_logits':
out_logits,
}
for train, test, network in zip(
TRAIN_SHAPES[:-1], TEST_SHAPES[:-1], NETWORK[:-1])
for out_logits in OUTPUT_LOGITS))
def testTrainedEnsemblePredCov(self, train_shape, test_shape, network,
out_logits):
if xla_bridge.get_backend().platform == 'gpu' and config.read(
'jax_enable_x64'):
raise jtu.SkipTest('Not running GPU x64 to save time.')
training_steps = 5000
learning_rate = 1.0
ensemble_size = 50
init_fn, apply_fn, ker_fn = stax.serial(
stax.Dense(1024, W_std=1.2, b_std=0.05), stax.Erf(),
stax.Dense(out_logits, W_std=1.2, b_std=0.05))
opt_init, opt_update, get_params = optimizers.sgd(learning_rate)
opt_update = jit(opt_update)
key = random.PRNGKey(0)
key, = random.split(key, 1)
key, split = random.split(key)
x_train = np.cos(random.normal(split, train_shape))
key, split = random.split(key)
y_train = np.array(
random.bernoulli(split, shape=(train_shape[0], out_logits)), np.float32)
train = (x_train, y_train)
key, split = random.split(key)
x_test = np.cos(random.normal(split, test_shape))
ensemble_key = random.split(key, ensemble_size)
loss = jit(lambda params, x, y: 0.5 * np.mean((apply_fn(params, x) - y)**2))
grad_loss = jit(lambda state, x, y: grad(loss)(get_params(state), x, y))
def train_network(key):
_, params = init_fn(key, (-1,) + train_shape[1:])
opt_state = opt_init(params)
for i in range(training_steps):
opt_state = opt_update(i, grad_loss(opt_state, *train), opt_state)
return get_params(opt_state)
params = vmap(train_network)(ensemble_key)
ensemble_fx = vmap(apply_fn, (0, None))(params, x_test)
ensemble_loss = vmap(loss, (0, None, None))(params, x_train, y_train)
ensemble_loss = np.mean(ensemble_loss)
self.assertLess(ensemble_loss, 1e-5, True)
mean_emp = np.mean(ensemble_fx, axis=0)
mean_subtracted = ensemble_fx - mean_emp
cov_emp = np.einsum(
'ijk,ilk->jl', mean_subtracted, mean_subtracted, optimize=True) / (
mean_subtracted.shape[0] * mean_subtracted.shape[-1])
reg = 1e-7
ntk_predictions = predict.gp_inference(
ker_fn, x_train, y_train, x_test, 'ntk', reg, compute_cov=True)
self.assertAllClose(mean_emp, ntk_predictions.mean, True, RTOL, ATOL)
self.assertAllClose(cov_emp, ntk_predictions.covariance, True, RTOL, ATOL)
def testPredictOnCPU(self):
x_train = random.normal(random.PRNGKey(1), (10, 4, 5, 3))
x_test = random.normal(random.PRNGKey(1), (8, 4, 5, 3))
y_train = random.uniform(random.PRNGKey(1), (10, 7))
_, _, kernel_fn = stax.serial(
stax.Conv(1, (3, 3)), stax.Relu(), stax.Flatten(), stax.Dense(1))
for store_on_device in [False, True]:
for device_count in [0, 1]:
for get in ['ntk', 'nngp', ('nngp', 'ntk'), ('ntk', 'nngp')]:
with self.subTest(
store_on_device=store_on_device,
device_count=device_count,
get=get):
kernel_fn_batched = batch.batch(kernel_fn, 2, device_count,
store_on_device)
predictor = predict.gradient_descent_mse_gp(kernel_fn_batched,
x_train, y_train,
x_test, get, 0., True)
gp_inference = predict.gp_inference(kernel_fn_batched, x_train,
y_train, x_test, get, 0., True)
self.assertAllClose(predictor(None), predictor(np.inf), True)
self.assertAllClose(predictor(None), gp_inference, True)
def testIsOnCPU(self):
for dtype in [np.float32, np.float64]:
with self.subTest(dtype=dtype):
def x():
return random.normal(random.PRNGKey(1), (2, 3), dtype)
def x_cpu():
return device_get(random.normal(random.PRNGKey(1), (2, 3), dtype))
x_jit = jit(x)
x_cpu_jit = jit(x_cpu)
x_cpu_jit_cpu = jit(x_cpu, backend='cpu')
self.assertTrue(predict._is_on_cpu(x_cpu()))
self.assertTrue(predict._is_on_cpu(x_cpu_jit()))
self.assertTrue(predict._is_on_cpu(x_cpu_jit_cpu()))
if xla_bridge.get_backend().platform == 'cpu':
self.assertTrue(predict._is_on_cpu(x()))
self.assertTrue(predict._is_on_cpu(x_jit()))
else:
self.assertFalse(predict._is_on_cpu(x()))
self.assertFalse(predict._is_on_cpu(x_jit()))
if __name__ == '__main__':
jtu.absltest.main()
| 33.253817
| 80
| 0.609527
|
871e7ccd4f9e76ec93577b82ae01f51ed8069921
| 5,411
|
py
|
Python
|
crawler/post_process_jsonv2.py
|
saurabhvyas/dataset_creator
|
770477aca0611f6e1776ff6059c454d34693e18d
|
[
"MIT"
] | 1
|
2019-07-01T15:26:57.000Z
|
2019-07-01T15:26:57.000Z
|
crawler/post_process_jsonv2.py
|
saurabhvyas/dataset_creator
|
770477aca0611f6e1776ff6059c454d34693e18d
|
[
"MIT"
] | 1
|
2020-01-11T19:24:43.000Z
|
2020-01-11T19:24:43.000Z
|
crawler/post_process_jsonv2.py
|
saurabhvyas/dataset_creator
|
770477aca0611f6e1776ff6059c454d34693e18d
|
[
"MIT"
] | null | null | null |
import json
'''import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-wav_file")
parser.add_argument("-txt_file")
args = parser.parse_args()
# this code section simply takes wave file ( ) and txt file (txt_file) and gentle json(json_file)
and returns starting and ending times
'''
def filter_json(json_dict):
last_word_index=-1
for id,word in enumerate(json_dict["words"]):
if "end" in word:
last_word_index=id
return json_dict["words"][0:last_word_index]
def get_sentence_boundary(json_file,txt_file):
with open(json_file, 'r') as f:
array = json.load(f)
# print(array["words"])
array=filter_json(array)
text = ""
sentence_ended = False
sentence_started = False
starting_time=0
ending_time=0
sentences=[]
current_longest_sentence_length=0
current_longest_sentence_id=0
for word in array:
if "end" in word:
sentence_ended=False
ending_time=word["end"]
if sentence_started == False:
sentence_started = True
starting_time=word["start"]
#if sentence_ended == False:
text = text + " " + word["alignedWord"]
#elif sentence_ended == True:
#text = word["alignedWord"]
else :
if len(text) != 0:
if len(text) > current_longest_sentence_length:
current_longest_sentence_length=len(text)
current_longest_sentence_id=len(sentences)
sentences.append([starting_time,ending_time,len(text),text])
text=""
sentence_started=False
starting_time=0
ending_time=0
#sentence_ended = True
#print(" text : " + text )
#print(" starting time : " + str(starting_time) )
#print(" ending time : " + str(ending_time) )
# overwrite text file with longest sentence and return its starting time and ending time
data=0
with open(txt_file, 'w+') as out:
#data = out.read()
#if len(data) == 0:
#print(text)
out.write(sentences[current_longest_sentence_id][3])
#if text=="" or len(data) == 0:
if text=="":
return [0,0]
else:
return [sentences[current_longest_sentence_id][0], sentences[current_longest_sentence_id][1]]
'''
start of another code segment
will document this later
'''
def istxtfileempty(path):
import os
return os.stat(path).st_size == 0
def getaudiolen(path):
import soundfile as sf
f = sf.SoundFile(path)
#print('samples = {}'.format(len(f)))
#print('sample rate = {}'.format(f.samplerate))
return len(f) / f.samplerate
def trim_audio(wavfilename,starting_time,ending_time):
import subprocess
subprocess.call(["ffmpeg", "-i",wavfilename,"-ss",starting_time,"-to",ending_time,"-y" , "-c" , "copy" , wavfilename ])
def sendgentlerequest(wavfilepath,txtfilepath,outputjsonpath):
'''
this function calls gentle forced aligner docker container passes txt file, wave file
it expects output json which it stores to output json path
'''
import requests
#payload = {'audio=@': wavfilepath, 'transcript=<': txt_file_path}
#r = requests.post('http://localhost:8765/transcriptions?async=false',data=payload)
#import requests
with open(txtfilepath, 'r') as file:
txt_data = file.read().replace('\n', '')
params = (
('async', 'false'),
)
files = {
'audio': ( wavfilepath, open(wavfilepath, 'rb')),
'transcript': (None, txt_data),
}
r = requests.post('http://localhost:8765/transcriptions', params=params, files=files)
import json
print(r.json())
with open(outputjsonpath, 'w', encoding='utf-8') as f:
json.dump(r.json(), f, ensure_ascii=False, indent=4)
# iterate over all subdirectories in wav folder
import os
rootdir = 'filter_dir/wav/'
import glob,os
from pathlib import Path
for folder in glob.iglob('./filter_dir/wav/*'):
print(folder)
base_folder_path=Path(folder).name
for file in glob.iglob(folder + "/*.wav"):
#print(file)
base_file_name=Path(file).name
# get audio length
audio_len=getaudiolen(file)
txt_file_path="./filter_dir/txt/" + base_folder_path + "/" + base_file_name.replace("wav","txt")
output_json_path="./filter_dir/txt/" + base_folder_path + "/" + base_file_name.replace("wav","json")
print(txt_file_path)
#print(audio_len)
if audio_len!=0 and istxtfileempty(txt_file_path) ==False:
print("calling gentle")
# call gentle
sendgentlerequest(file,txt_file_path,output_json_path)
# get sentence boundaries
boundaries=get_sentence_boundary(output_json_path,txt_file_path)
starting_time=boundaries[0]
ending_time=boundaries[1]
#trim audio
print("starting time : ",starting_time)
print("ending time : ",ending_time)
trim_audio(file,str(starting_time),str(ending_time))
| 29.091398
| 123
| 0.595823
|
73113539329c5493141db243b85254062f7b8f88
| 2,075
|
py
|
Python
|
tensorflow/python/keras/_impl/keras/__init__.py
|
M155K4R4/Tensorflow
|
e5e03ef3148303b3dfed89a1492dedf92b45be25
|
[
"Apache-2.0"
] | 24
|
2018-02-01T15:49:22.000Z
|
2021-01-11T16:31:18.000Z
|
tensorflow/python/keras/_impl/keras/__init__.py
|
M155K4R4/Tensorflow
|
e5e03ef3148303b3dfed89a1492dedf92b45be25
|
[
"Apache-2.0"
] | 2
|
2018-09-09T07:29:07.000Z
|
2019-03-11T07:14:45.000Z
|
tensorflow/python/keras/_impl/keras/__init__.py
|
M155K4R4/Tensorflow
|
e5e03ef3148303b3dfed89a1492dedf92b45be25
|
[
"Apache-2.0"
] | 4
|
2018-10-29T18:43:22.000Z
|
2020-09-28T07:19:52.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Keras API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras import activations
from tensorflow.python.keras._impl.keras import applications
from tensorflow.python.keras._impl.keras import backend
from tensorflow.python.keras._impl.keras import callbacks
from tensorflow.python.keras._impl.keras import constraints
from tensorflow.python.keras._impl.keras import datasets
from tensorflow.python.keras._impl.keras import engine
from tensorflow.python.keras._impl.keras import estimator
from tensorflow.python.keras._impl.keras import initializers
from tensorflow.python.keras._impl.keras import layers
from tensorflow.python.keras._impl.keras import losses
from tensorflow.python.keras._impl.keras import metrics
from tensorflow.python.keras._impl.keras import models
from tensorflow.python.keras._impl.keras import optimizers
from tensorflow.python.keras._impl.keras import preprocessing
from tensorflow.python.keras._impl.keras import regularizers
from tensorflow.python.keras._impl.keras import utils
from tensorflow.python.keras._impl.keras import wrappers
from tensorflow.python.keras._impl.keras.layers import Input
from tensorflow.python.keras._impl.keras.models import Model
from tensorflow.python.keras._impl.keras.models import Sequential
__version__ = '2.1.3-tf'
| 47.159091
| 80
| 0.793735
|
d58a0fcc0ed66874ac9c2a2b63d2af320ee9ec40
| 5,942
|
py
|
Python
|
Speedo/plugins/yt.py
|
Hobby-Dev-0/Speedo
|
bf0c935aa63e782835e15ba2ffab8bbe52771201
|
[
"BSD-3-Clause"
] | null | null | null |
Speedo/plugins/yt.py
|
Hobby-Dev-0/Speedo
|
bf0c935aa63e782835e15ba2ffab8bbe52771201
|
[
"BSD-3-Clause"
] | null | null | null |
Speedo/plugins/yt.py
|
Hobby-Dev-0/Speedo
|
bf0c935aa63e782835e15ba2ffab8bbe52771201
|
[
"BSD-3-Clause"
] | 3
|
2021-10-12T08:17:01.000Z
|
2021-12-21T01:17:54.000Z
|
import re
import random
import json
from pathlib import Path
import asyncio
import math
import os
import time
from telethon.tl.types import DocumentAttributeAudio
from youtube_search import YoutubeSearch
from youtube_dl import YoutubeDL
from youtube_dl.utils import (
ContentTooShortError,
DownloadError,
ExtractorError,
GeoRestrictedError,
MaxDownloadsReached,
PostProcessingError,
UnavailableVideoError,
XAttrMetadataError,
)
from . import *
@bot.on(admin_cmd(pattern="yt(a|v) (.*)"))
@bot.on(sudo_cmd(pattern="yt(a|v) (.*)", allow_sudo=True))
async def download_video(v_url):
if v_url.fwd_from:
return
url = v_url.pattern_match.group(2)
type = v_url.pattern_match.group(1).lower()
event = await eor(v_url, "`Preparing to download...`")
if type == "a":
opts = {
"format": "bestaudio",
"addmetadata": True,
"key": "FFmpegMetadata",
"writethumbnail": True,
"prefer_ffmpeg": True,
"geo_bypass": True,
"nocheckcertificate": True,
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "480",
}
],
"outtmpl": "%(id)s.mp3",
"quiet": True,
"logtostderr": False,
}
video = False
song = True
elif type == "v":
opts = {
"format": "best",
"addmetadata": True,
"key": "FFmpegMetadata",
"prefer_ffmpeg": True,
"geo_bypass": True,
"nocheckcertificate": True,
"postprocessors": [
{"key": "FFmpegVideoConvertor", "preferedformat": "mp4"}
],
"outtmpl": "%(id)s.mp4",
"logtostderr": False,
"quiet": True,
}
song = False
video = True
try:
await event.edit("**Fetching YT link...**")
with YoutubeDL(opts) as ytdl:
ytdl_data = ytdl.extract_info(url)
except DownloadError as DE:
await edit_or_reply(v_url, f"`{str(DE)}`")
return
except ContentTooShortError:
await eod(event, "`The download content was too short.`")
return
except GeoRestrictedError:
await eod(event,
"`Video is not available from your geographic location due to geographic restrictions imposed by a website.`"
)
return
except MaxDownloadsReached:
await eod(event, "`Max-downloads limit has been reached.`")
return
except PostProcessingError:
await eod(event, "`There was an error during post processing.`")
return
except UnavailableVideoError:
await eod(event, "`Media is not available in the requested format.`")
return
except XAttrMetadataError as XAME:
await edit_or_reply(v_url, f"`{XAME.code}: {XAME.msg}\n{XAME.reason}`")
return
except ExtractorError:
await eod(event, "`There was an error during info extraction.`")
return
except Exception as e:
await eod(event, f"{str(type(e)): {str(e)}}", 10)
return
c_time = time.time()
if song:
await eor(event,
f"📤 `Preparing to upload audio:`\
\n\n**{ytdl_data['title']}**\
\nby *{ytdl_data['uploader']}*"
)
await v_url.client.send_file(
v_url.chat_id,
f"{ytdl_data['id']}.mp3",
supports_streaming=True,
attributes=[
DocumentAttributeAudio(
duration=int(ytdl_data["duration"]),
title=str(ytdl_data["title"]),
performer=perf,
)
],
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(
d, t, v_url, c_time, "Uploading..", f"{ytdl_data['title']}.mp3"
)
),
)
os.remove(f"{ytdl_data['id']}.mp3")
await v_url.delete()
elif video:
await eor(event,
f"`Preparing to upload video:`\
\n\n**{ytdl_data['title']}**\
\nby *{ytdl_data['uploader']}*"
)
await v_url.client.send_file(
v_url.chat_id,
f"{ytdl_data['id']}.mp4",
supports_streaming=True,
caption=ytdl_data["title"],
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(
d, t, v_url, c_time, "Uploading..", f"{ytdl_data['title']}.mp4"
)
),
)
os.remove(f"{ytdl_data['id']}.mp4")
await v_url.delete()
@bot.on(admin_cmd(pattern="ytlink ?(.*)"))
@bot.on(sudo_cmd(pattern="ytlink ?(.*)", allow_sudo=True))
async def hmm(ytwala):
query = ytwala.pattern_match.group(1)
if not query:
await eod(ytwala, "`Enter query to search on yt`")
event = await eor(ytwala, "`Processing...`")
try:
results = json.loads(YoutubeSearch(query, max_results=7).to_json())
except KeyError:
return await eod(event, "Unable to find relevant search queries...")
output = f"**Search Query:**\n`{query}`\n\n**Results:**\n\n"
for i in results["videos"]:
output += (f"--> `{i['title']}`\nhttps://www.youtube.com{i['url_suffix']}\n\n")
await event.edit(output, link_preview=False)
CmdHelp("youtube").add_command(
"yta", "<yt link>", "Extracts the audio from given youtube link and uploads it to telegram"
).add_command(
"ytv", "<yt link>", "Extracts the video from given youtube link and uploads it to telegram"
).add_command(
"ytlink", "<search keyword>", "Extracts 7 links from youtube based on the given search query"
).add_info(
"Youthoob ki duniya."
).add_warning(
"✅ Harmless Module."
).add()
| 31.775401
| 121
| 0.561427
|
f719fd37c128d9a9db10d9a47902af2a5eb5d61e
| 3,283
|
py
|
Python
|
lektor/markdown/__init__.py
|
uk0/lektor
|
21bdf99aa1183b4398043f87ba8ed137fad529ce
|
[
"BSD-3-Clause"
] | null | null | null |
lektor/markdown/__init__.py
|
uk0/lektor
|
21bdf99aa1183b4398043f87ba8ed137fad529ce
|
[
"BSD-3-Clause"
] | null | null | null |
lektor/markdown/__init__.py
|
uk0/lektor
|
21bdf99aa1183b4398043f87ba8ed137fad529ce
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
from typing import Any
from typing import Dict
from typing import Hashable
from typing import Type
from typing import TYPE_CHECKING
from weakref import ref as weakref
from deprecated import deprecated
from markupsafe import Markup
from lektor.markdown.controller import ControllerCache
from lektor.markdown.controller import FieldOptions
from lektor.markdown.controller import MarkdownController
from lektor.markdown.controller import Meta
from lektor.markdown.controller import RenderResult
from lektor.sourceobj import SourceObject
if sys.version_info >= (3, 8):
from importlib.metadata import version
else:
from importlib_metadata import version
if TYPE_CHECKING: # pragma: no cover
from lektor.environment import Environment
controller_class: Type[MarkdownController]
MISTUNE_VERSION = version("mistune")
if MISTUNE_VERSION.startswith("0."):
from lektor.markdown.mistune0 import MarkdownController0 as controller_class
elif MISTUNE_VERSION.startswith("2."):
from lektor.markdown.mistune2 import MarkdownController2 as controller_class
else: # pragma: no cover
raise ImportError("Unsupported version of mistune")
get_controller = ControllerCache(controller_class)
@deprecated
def make_markdown(env: "Environment") -> Any: # (Environment) -> mistune.Markdown
return get_controller(env).make_parser()
@deprecated
def markdown_to_html(
text: str, record: SourceObject, field_options: FieldOptions
) -> RenderResult:
return get_controller().render(text, record, field_options)
class Markdown:
def __init__(
self, source: str, record: SourceObject, field_options: FieldOptions
) -> None:
self.source = source
self.__record = weakref(record)
self.__field_options = field_options
self.__cache: Dict[Hashable, RenderResult] = {}
def __bool__(self) -> bool:
return bool(self.source)
__nonzero__ = __bool__
@property
def record(self) -> SourceObject:
record = self.__record()
if record is None:
raise RuntimeError("Record has gone away")
return record
def __render(self) -> RenderResult:
# When the markdown instance is attached to a cached object we
# can end up in the situation where, e.g., the base_url has
# changed from the time we were put into the cache to the time
# where we got referenced by something elsewhere. Since this
# affects the processing of relative links, in that case we
# need to re-process our markdown.
controller = get_controller()
key = controller.get_cache_key()
result = self.__cache.get(key) if key is not None else None
if result is None:
result = controller.render(self.source, self.record, self.__field_options)
if key is not None:
self.__cache[key] = result
return result
@property
def meta(self) -> Meta:
return self.__render().meta
@property
def html(self) -> Markup:
return Markup(self.__render().html)
def __getitem__(self, name: str) -> Any:
return self.meta[name]
def __str__(self) -> str:
return self.__render().html
def __html__(self) -> Markup:
return self.html
| 30.682243
| 86
| 0.709108
|
478516ee1b7cfb538366afadda705d9f9f06b79a
| 214
|
py
|
Python
|
freight_forwarder/commercial_invoice/__init__.py
|
TUNE-Archive/freight_forwarder
|
6ea4a49f474ec04abb8bb81b175c774a16b5312f
|
[
"MIT"
] | null | null | null |
freight_forwarder/commercial_invoice/__init__.py
|
TUNE-Archive/freight_forwarder
|
6ea4a49f474ec04abb8bb81b175c774a16b5312f
|
[
"MIT"
] | null | null | null |
freight_forwarder/commercial_invoice/__init__.py
|
TUNE-Archive/freight_forwarder
|
6ea4a49f474ec04abb8bb81b175c774a16b5312f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8; -*-
# flake8: noqa
from __future__ import unicode_literals, absolute_import
from .commercial_invoice import CommercialInvoice
from .service import Service
__author__ = 'alexb'
| 23.777778
| 56
| 0.724299
|
b63fdcbdcdeccf24d586e3ee71e004cf431f726e
| 53,839
|
py
|
Python
|
tests/test_edgeql_volatility.py
|
sobolevn/edgedb
|
b346b6496a203d8313d4c9adf68b0478f85122ed
|
[
"Apache-2.0"
] | null | null | null |
tests/test_edgeql_volatility.py
|
sobolevn/edgedb
|
b346b6496a203d8313d4c9adf68b0478f85122ed
|
[
"Apache-2.0"
] | null | null | null |
tests/test_edgeql_volatility.py
|
sobolevn/edgedb
|
b346b6496a203d8313d4c9adf68b0478f85122ed
|
[
"Apache-2.0"
] | null | null | null |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os.path
import edgedb
from edb.testbase import server as tb
from edb.testbase import serutils
from edb.tools import test
class TestEdgeQLVolatility(tb.QueryTestCase):
SCHEMA = os.path.join(os.path.dirname(__file__), 'schemas',
'volatility.esdl')
SETUP = os.path.join(os.path.dirname(__file__), 'schemas',
'volatility_setup.edgeql')
def _check_crossproduct(self, res):
ns = set()
pairs = set()
for row in res:
ns.add(row[0])
pairs.add((row[0], row[1]))
self.assertEqual(
pairs,
{(n1, n2) for n1 in ns for n2 in ns},
)
def test_loop(self, n=None, *, one=False):
async def json_query(*args, **kwargs):
q = self.con.query_one_json if one else self.con.query_json
res = await q(*args, **kwargs)
return json.loads(res)
async def native_query(*args, **kwargs):
q = self.con.query_one if one else self.con.query
res = await q(*args, **kwargs)
return serutils.serialize(res)
qs = [json_query, native_query]
if n is None:
n = len(qs)
for i in range(n):
yield qs[i % len(qs)]
async def test_edgeql_volatility_function_01(self):
result = await self.con.query(
r"""
SELECT Obj {
# immutable function should only be called once,
# generating the same value for all Objs
x := vol_immutable()
};
"""
)
self.assertEqual(
len(set(res.x for res in result)), 1,
'more than one value for the same vol_immutable() call'
)
async def test_edgeql_volatility_function_02(self):
result = await self.con.query(
r"""
SELECT Obj {
# stable function should only be called once,
# generating the same value for all Objs
x := vol_stable()
};
"""
)
self.assertEqual(
len(set(res.x for res in result)), 1,
'more than one value for the same vol_stable() call'
)
async def test_edgeql_volatility_function_03a(self):
result = await self.con.query(
r"""
SELECT Obj {
# volatile function should be called once for each
# Obj, generating different values
x := vol_volatile()
};
"""
)
self.assertNotEqual(
len(set(res.x for res in result)), 1,
'only one value for multiple vol_volatile() calls'
)
async def test_edgeql_volatility_function_03b(self):
result = await self.con.query(
r"""
SELECT Obj {
# volatile function should be called once for each
# Obj, generating different values
x := (vol_volatile(),)
};
"""
)
self.assertNotEqual(
len(set(res.x for res in result)), 1,
'only one value for multiple vol_volatile() calls'
)
async def test_edgeql_volatility_function_04(self):
with self.assertRaises(edgedb.DivisionByZeroError):
await self.con.execute(r'''
SELECT Obj {
# this condition is true for all of the Objs, but
# a constant immutable function call can be
# factored out and called once per query
x := 1 IF Obj.n > 0 ELSE err_immutable()
};
''')
async def test_edgeql_volatility_function_05(self):
await self.assert_query_result(r'''
SELECT Obj {
# this condition is true for all of the Objs and the
# stable function call cannot be factored out
x := 1 IF Obj.n > 0 ELSE err_stable()
};
''', [
{'x': 1},
{'x': 1},
{'x': 1},
])
async def test_edgeql_volatility_function_06(self):
await self.assert_query_result(r'''
SELECT Obj {
# this condition is true for all of the Objs and the
# volatile function call cannot be factored out
x := 1 IF Obj.n > 0 ELSE err_volatile()
};
''', [
{'x': 1},
{'x': 1},
{'x': 1},
])
async def test_edgeql_volatility_operator_01(self):
with self.assertRaises(edgedb.DivisionByZeroError):
await self.con.execute(r'''
SELECT Obj {
# this condition is true for all of the Objs, but
# a constant immutable operation can be factored out
# and called once per query
x := 1 IF Obj.n > 0 ELSE (1/0)
};
''')
async def test_edgeql_volatility_cast_01(self):
with self.assertRaises(edgedb.DivisionByZeroError):
await self.con.execute(r'''
SELECT Obj {
# this condition is true for all of the Objs, but
# a constant immutable cast can be factored out
# and called once per query
x := 1 IF Obj.n > 0 ELSE (<int64>(<float64>1)/0)
};
''')
async def test_edgeql_volatility_cast_02(self):
await self.assert_query_result(r'''
SELECT Obj {
# this condition is true for all of the Objs and the
# stable cast (<json>) cannot be factored out
x := 1 IF Obj.n > 0 ELSE (<int64>(<json>1)/0)
};
''', [
{'x': 1},
{'x': 1},
{'x': 1},
])
async def test_edgeql_volatility_for_01(self):
await self.assert_query_result(
r'''
SELECT count(DISTINCT (FOR x in {1,2} UNION (
uuid_generate_v1mc())));
''',
[2],
)
async def test_edgeql_volatility_for_02(self):
await self.assert_query_result(
r'''
WITH X := (FOR x in {1,2} UNION (uuid_generate_v1mc(), x))
SELECT count(DISTINCT X.0);
''',
[2],
)
async def test_edgeql_volatility_for_03(self):
await self.assert_query_result(
r'''
WITH X := (FOR y in {1, 2} UNION (
FOR x in {1,2} UNION (uuid_generate_v1mc(), x)))
SELECT count(DISTINCT X.0);
''',
[4],
)
async def test_edgeql_volatility_for_04(self):
await self.assert_query_result(
r'''
WITH X := (FOR y in {1, 2} UNION (
(0,
(FOR x in {1,2} UNION (
uuid_generate_v1mc(), x)))))
SELECT count(DISTINCT X.1.0);
''',
[4],
)
async def test_edgeql_volatility_for_05(self):
await self.assert_query_result(
r'''
WITH X := (FOR y in {1, 2} UNION (
(uuid_generate_v1mc(),
(INSERT Obj { n := y }))))
SELECT count(DISTINCT X.0);
''',
[2],
)
async def test_edgeql_volatility_for_06(self):
await self.assert_query_result(
r'''
SELECT count(DISTINCT (FOR x in {1,1} UNION (
uuid_generate_v1mc())));
''',
[2],
)
async def test_edgeql_volatility_for_07(self):
await self.assert_query_result(
r'''
SELECT count(DISTINCT (FOR x in {(),()} UNION (
uuid_generate_v1mc())));
''',
[2],
)
async def test_edgeql_volatility_for_08(self):
await self.assert_query_result(
r'''
SELECT count(DISTINCT (FOR x in {({1,2}, 0).1} UNION (
uuid_generate_v1mc())));
''',
[2],
)
async def test_edgeql_volatility_for_09(self):
await self.assert_query_result(
r'''
SELECT count(
DISTINCT (FOR x in {(Obj { x := random() }).x} UNION (
uuid_generate_v1mc())));
''',
[3],
)
async def test_edgeql_volatility_for_10(self):
res = await self.con.query(
r'''
WITH x := random() FOR y in {1,2,3} UNION (x);
''',
)
self.assertEqual(len(set(res)), 1)
async def test_edgeql_volatility_select_clause_01a(self):
# Spurious failure probability: 1/100!
# We need a nested SELECT because of bug #1816
# loses the ORDER BY otherwise
await self.assert_query_result(
r'''
WITH X := enumerate((SELECT _gen_series(0,99)
ORDER BY random()))
SELECT all(X.0 = X.1);
''',
[False],
)
async def test_edgeql_volatility_select_clause_01(self):
# Spurious failure probability: 1/100!
# We need a nested SELECT because of bug #1816
# loses the ORDER BY otherwise
await self.assert_query_result(
r'''
WITH X := enumerate((SELECT _gen_series(0,99)
ORDER BY random()))
SELECT all((FOR x in {X} UNION (x.0 = x.1)))
''',
[False],
)
async def test_edgeql_volatility_select_clause_02(self):
# Spurious failure probability: 1/2^99
await self.assert_query_result(
r'''
SELECT count((SELECT _gen_series(0,99) FILTER random() > 0.5))
NOT IN {0, 100};
''',
[True],
)
async def test_edgeql_volatility_select_clause_03(self):
# Spurious failure probability: 1/2^100 I think
# We want to test that the two SELECTs do separate FILTERs
# This is written in an awful way because of a bug with WITH.
await self.assert_query_result(
r'''
FOR X in {
array_agg(
(FOR x in {0, 1} UNION (SELECT _gen_series(0,100)
FILTER random() > 0.5)))}
UNION (
SELECT count(array_unpack(X))
!= 2*count(DISTINCT array_unpack(X)));
''',
[True],
)
async def test_edgeql_volatility_select_clause_04(self):
# Spurious failure probability: 1/2^100 I think
# This is just the test above but manually...
result = await self.con.query(
r'''
FOR x in {0, 1} UNION (
SELECT _gen_series(0,100) FILTER random() > 0.5
)
''',
)
self.assertNotEqual(
2 * len(set(result)), len(result),
'SELECT in FOR loop not doing independent filters'
)
async def test_edgeql_volatility_select_clause_05(self):
# Spurious failure probability: 1/2^99
await self.assert_query_result(
r'''
WITH X := (FOR x in {_gen_series(0,99)} UNION (()))
SELECT count((SELECT X FILTER random() > 0.5))
NOT IN {0, 100};
''',
[True],
)
async def test_edgeql_volatility_select_clause_06(self):
# Spurious failure probability: 1/2^99
await self.assert_query_result(
r'''
WITH X := (_gen_series(0,99), 0).1
SELECT count((SELECT X FILTER random() > 0.5))
NOT IN {0, 100};
''',
[True],
)
async def test_edgeql_volatility_with_01(self):
await self.assert_query_result(
r'''
WITH X := random() SELECT sum(X) = sum(X);
''',
[True],
)
async def test_edgeql_volatility_with_02(self):
await self.assert_query_result(
r'''
WITH X := random(), Y := X SELECT sum(Y) = sum(Y)
''',
[True],
)
async def test_edgeql_volatility_with_03(self):
await self.assert_query_result(
r'''
WITH W := random(),
Z := W,
SELECT W = Z;
''',
[True],
)
async def test_edgeql_volatility_with_04(self):
await self.assert_query_result(
r'''
WITH W := {random(), random()},
Z := W+0,
SELECT _ := (W = Z) ORDER BY _;
''',
[False, False, True, True],
)
async def test_edgeql_volatility_with_05(self):
await self.con.execute(r'''
CREATE TYPE Foo { CREATE PROPERTY asdf -> tuple<float64> };
''')
await self.con.query(r'''
WITH X := (random(),) SELECT X.0;
''')
await self.con.query(r'''
WITH X := {(random(),),(random(),)} SELECT X.0;
''')
async def test_edgeql_volatility_update_clause_01(self):
# Spurious failure probability: 1/2^99
await self.con.execute(r'''
FOR x in {_gen_series(4,100)} UNION (
INSERT Obj { n := x })
''')
await self.assert_query_result(
r'''
SELECT count(Obj)
''',
[100],
)
await self.assert_query_result(
r'''
WITH X := (UPDATE Obj FILTER random() > 0.5
SET { n := -1 })
SELECT count(X) NOT IN {0, 100}
''',
[True],
)
await self.assert_query_result(
r'''
WITH X := (SELECT Obj FILTER .n < 0)
SELECT count(X) != 0 AND count(X) != 100
''',
[True],
)
async def test_edgeql_volatility_delete_clause_01(self):
# Spurious failure probability: 1/2^99
await self.con.execute(r'''
FOR x in {_gen_series(4,100)} UNION (
INSERT Obj { n := x })
''')
await self.assert_query_result(
r'''
WITH X := (DELETE Obj FILTER random() > 0.5)
SELECT count(X) NOT IN {0, 100}
''',
[True],
)
await self.assert_query_result(
r'''
SELECT count(Obj) != 0 AND count(Obj) != 100
''',
[True],
)
async def test_edgeql_volatility_select_with_objects_01(self):
for query in self.test_loop(10):
res = await query("""
WITH W := (SELECT Obj FILTER random() > 0.5),
SELECT ((SELECT W {n}), (SELECT W {n}))
""")
self._check_crossproduct(
[(row[0]['n'], row[1]['n']) for row in res])
async def test_edgeql_volatility_select_with_objects_02(self):
for query in self.test_loop(10):
res = await query("""
SELECT Obj {n, m := random()}
FILTER .m > 0.3 ORDER BY .m;
""")
for row in res:
self.assertGreater(row['m'], 0.3)
nums = [row['m'] for row in res]
self.assertEqual(nums, sorted(nums))
async def test_edgeql_volatility_select_with_objects_03(self):
for query in self.test_loop(10):
res = await query("""
SELECT {
o := (
SELECT Obj {n, m := random()}
FILTER .m > 0.3 ORDER BY .m
)
};
""")
res = res[0]['o']
for row in res:
self.assertGreater(row['m'], 0.3)
nums = [row['m'] for row in res]
self.assertEqual(nums, sorted(nums))
async def test_edgeql_volatility_select_with_objects_04(self):
for query in self.test_loop(10):
res = await query("""
SELECT {
o := (SELECT (
SELECT Obj {n, m := random()}
FILTER .m > 0.3 ORDER BY .m
))
}
""")
res = res[0]['o']
for row in res:
self.assertGreater(row['m'], 0.3)
nums = [row['m'] for row in res]
self.assertEqual(nums, sorted(nums))
async def test_edgeql_volatility_select_with_objects_05(self):
for query in self.test_loop(10):
res = await query("""
SELECT {
o := (SELECT (
SELECT Obj {n, m := random()}
FILTER .m > 0.3
) ORDER BY .m)
}
""")
res = res[0]['o']
for row in res:
self.assertGreater(row['m'], 0.3)
nums = [row['m'] for row in res]
self.assertEqual(nums, sorted(nums))
async def test_edgeql_volatility_select_with_objects_06(self):
for query in self.test_loop(10):
res = await query("""
SELECT (
SELECT Obj {n, m := random()}
) FILTER .m > 0.3 ORDER BY .m
""")
for row in res:
self.assertGreater(row['m'], 0.3)
nums = [row['m'] for row in res]
self.assertEqual(nums, sorted(nums))
async def test_edgeql_volatility_select_with_objects_07(self):
for query in self.test_loop(10):
res = await query("""
SELECT (
SELECT Obj {n, m := {random(), random()}}
) ORDER BY max(.m)
""")
nums = [row['m'] for row in res]
self.assertEqual(nums, sorted(nums, key=max))
async def test_edgeql_volatility_select_with_objects_08(self):
for query in self.test_loop(10):
res = await query("""
SELECT (
SELECT Obj {n, m := (random(), random())}
) ORDER BY max(.m)
""")
nums = [row['m'] for row in res]
self.assertEqual(nums, sorted(nums))
async def test_edgeql_volatility_select_with_objects_09(self):
for query in self.test_loop(10):
res = await query("""
SELECT (
SELECT Obj {n, m := [random(), random()]}
) ORDER BY max(.m)
""")
nums = [row['m'] for row in res]
self.assertEqual(nums, sorted(nums))
async def test_edgeql_volatility_select_objects_optional_01(self):
for _ in range(10):
await self.assert_query_result(
r'''
WITH X := (SELECT Obj {
m := (SELECT .n FILTER random() > 0.5) }),
SELECT count(X);
''',
[3],
)
async def test_edgeql_volatility_select_objects_optional_02(self):
for query in self.test_loop(10, one=True):
res = await query("""
WITH X := (SELECT Obj {
m := (SELECT .n FILTER random() > 0.5) }),
SELECT {
foo := (SELECT X {n, m}),
baz := (SELECT X.m),
};
""")
foos = [x['m'] for x in res['foo'] if x['m'] is not None]
self.assertEqual(set(foos), set(res['baz']))
async def test_edgeql_volatility_select_hard_objects_01a(self):
for query in self.test_loop():
res = await query("""
WITH O := (SELECT Obj {m := next()}),
SELECT (O.m, O.m);
""")
self.assertEqual(len(res), 3)
for row in res:
self.assertEqual(row[0], row[1])
# Make sure it is really volatile
self.assertNotEqual(res[0][0], res[1][0])
async def test_edgeql_volatility_select_hard_objects_01b(self):
for query in self.test_loop():
# one side in a subquery, one not
res = await query("""
WITH O := (SELECT Obj {m := next()}),
SELECT ((SELECT O.m), O.m);
""")
self.assertEqual(len(res), 3)
for row in res:
self.assertEqual(row[0], row[1])
async def test_edgeql_volatility_select_hard_objects_02a(self):
for query in self.test_loop():
res = await query("""
WITH O := (SELECT Obj {m := next()}),
SELECT ((SELECT O.m), (SELECT O.m));
""")
self.assertEqual(len(res), 9)
self._check_crossproduct(res)
async def test_edgeql_volatility_select_hard_objects_02b(self):
for query in self.test_loop(10):
res = await query("""
WITH O := (SELECT Obj {m := random()} FILTER .m > 0.3),
SELECT ((SELECT O.m), (SELECT O.m));
""")
for row in res:
self.assertGreater(row[0], 0.3)
self._check_crossproduct(res)
async def test_edgeql_volatility_select_hard_objects_03(self):
for query in self.test_loop():
res = await query("""
WITH O := (SELECT Obj {m := next()}),
SELECT (O {m}, O {m});
""")
self.assertEqual(len(res), 3)
for row in res:
self.assertEqual(row[0]['m'], row[1]['m'])
async def test_edgeql_volatility_select_hard_objects_04a(self):
# TODO: this, but wrapped in DISTINCT
# (which breaks the serialization, ugh!)
for query in self.test_loop():
res = await query("""
WITH O := (SELECT Obj {m := next()}),
SELECT ((SELECT O {m}), (SELECT O {m}));
""")
self._check_crossproduct(
[(row[0]['m'], row[1]['m']) for row in res])
async def test_edgeql_volatility_select_hard_objects_04b(self):
# TODO: this, but wrapped in DISTINCT
# (which breaks the serialization, ugh!)
for query in self.test_loop(10):
res = await query("""
WITH O := (SELECT Obj {m := random()} FILTER .m > 0.3),
SELECT ((SELECT O {m}), (SELECT O {m}));
""")
for row in res:
self.assertGreater(row[0]['m'], 0.3)
self._check_crossproduct(
[(row[0]['m'], row[1]['m']) for row in res])
async def test_edgeql_volatility_select_hard_objects_05(self):
for query in self.test_loop():
res = await query("""
WITH O := (SELECT {m := next()} LIMIT 1),
SELECT (O {m}, O {m});
""")
self.assertEqual(len(res), 1)
for row in res:
self.assertEqual(row[0]['m'], row[1]['m'])
async def test_edgeql_volatility_select_hard_objects_06(self):
# now let's try it with a multi prop
res = await self.con.query("""
WITH O := (SELECT Obj {m := {next(), next()} })
SELECT ((SELECT O {m}), (SELECT O {m}));
""")
self._check_crossproduct([(row[0].m, row[1].m) for row in res])
async def test_edgeql_volatility_select_hard_objects_07(self):
# now let's try it with a multi prop
for query in self.test_loop():
res = await query("""
WITH O := (SELECT Obj {m := {next(), next()} })
SELECT ((O {m}), (O {m}));
""")
self.assertEqual(len(res), 3)
for row in res:
self.assertEqual(row[0]['m'], row[1]['m'])
async def test_edgeql_volatility_select_hard_objects_08a(self):
for query in self.test_loop(one=True):
res = await query("""
WITH O := (SELECT Obj {m := next()}),
SELECT {
foo := (SELECT O {n, m}),
bar := (SELECT O {n, m}),
};
""")
self.assertEqual(
{(x['n'], x['m']) for x in res['foo']},
{(x['n'], x['m']) for x in res['bar']},
)
self.assertEqual(len(res['foo']), 3)
async def test_edgeql_volatility_select_hard_objects_08b(self):
for query in self.test_loop(one=True):
res = await query("""
WITH O := (SELECT Obj {m := next()} LIMIT 1),
SELECT {
foo := (SELECT O {n, m}),
bar := (SELECT O {n, m}),
};
""")
self.assertEqual(res['foo']['n'], res['bar']['n'])
self.assertEqual(res['foo']['m'], res['bar']['m'])
async def test_edgeql_volatility_select_hard_objects_09(self):
await self.assert_query_result(r'''
WITH O := (SELECT Obj {m := next()}),
SELECT {
foo := (SELECT O),
bar := (SELECT O),
};
''', [
{
'foo': [{"id": {}}, {"id": {}}, {"id": {}}],
'bar': [{"id": {}}, {"id": {}}, {"id": {}}],
}
])
async def test_edgeql_volatility_select_nested_01a(self):
for query in self.test_loop(10, one=True):
res = await query("""
WITH O := (SELECT Obj {
m := next(),
friends := (SELECT Tgt FILTER random() > 0.4)
}),
SELECT {
a := (SELECT O {m, friends: {n}} ORDER BY .m),
b := (SELECT O {m, friends: {n}} ORDER BY .m),
};
""")
nums = [row['m'] for row in res['a']]
self.assertEqual(nums, sorted(nums))
self.assertEqual(len(res['a']), 3)
for ra, rb in zip(res['a'], res['b']):
self.assertEqual(ra['m'], rb['m'])
self.assertEqual(
{x['n'] for x in ra['friends']},
{x['n'] for x in rb['friends']},
)
async def test_edgeql_volatility_select_nested_1b(self):
# same as 1b but without a shape on friends
for query in self.test_loop(10, one=True):
res = await query("""
WITH O := (SELECT Obj {
m := next(),
friends := (SELECT Tgt FILTER random() > 0.4)
}),
SELECT {
a := (SELECT O {m, friends} ORDER BY .m),
b := (SELECT O {m, friends} ORDER BY .m),
};
""")
nums = [row['m'] for row in res['a']]
self.assertEqual(nums, sorted(nums))
self.assertEqual(len(res['a']), 3)
for ra, rb in zip(res['a'], res['b']):
self.assertEqual(ra['m'], rb['m'])
self.assertEqual(
{x['id'] for x in ra['friends']},
{x['id'] for x in rb['friends']},
)
self.assertLessEqual(len(ra['friends']), 4)
async def test_edgeql_volatility_select_nested_02(self):
for query in self.test_loop(10, one=True):
res = await query("""
WITH O := (SELECT Obj {
m := next(),
friends := (SELECT .tgt FILTER random() > 0.4)
}),
SELECT {
a := (SELECT O {m, friends: {n}} ORDER BY .m),
b := (SELECT O {m, friend_nums := .friends.n} ORDER BY .m),
};
""")
nums = [row['m'] for row in res['a']]
self.assertEqual(nums, sorted(nums))
self.assertEqual(len(res['a']), 3)
for ra, rb in zip(res['a'], res['b']):
self.assertEqual(ra['m'], rb['m'])
self.assertEqual(
{x['n'] for x in ra['friends']},
set(rb['friend_nums']),
)
async def test_edgeql_volatility_select_nested_03a(self):
for query in self.test_loop(10, one=True):
res = await query("""
WITH O := (SELECT Obj {
m := next(),
friends := (SELECT .tgt { x := random() })
}),
SELECT {
a := (SELECT O {m, friends: {x}} ORDER BY .m),
b := (SELECT O {m, friend_nums := .friends.x} ORDER BY .m),
};
""")
nums = [row['m'] for row in res['a']]
self.assertEqual(nums, sorted(nums))
self.assertEqual(len(res['a']), 3)
for ra, rb in zip(res['a'], res['b']):
self.assertEqual(ra['m'], rb['m'])
self.assertEqual(
{x['x'] for x in ra['friends']},
set(rb['friend_nums']),
)
async def test_edgeql_volatility_select_nested_03b(self):
for query in self.test_loop(10, one=True):
res = await query("""
WITH O := (SELECT Obj {
m := next(),
friends := (SELECT (SELECT .tgt) { @x := next() })
}),
SELECT {
a := (SELECT O {m, friends: {@x}} ORDER BY .m),
b := (SELECT O {m, friend_nums := .friends@x} ORDER BY .m),
};
""")
nums = [row['m'] for row in res['a']]
self.assertEqual(nums, sorted(nums))
self.assertEqual(len(res['a']), 3)
for ra, rb in zip(res['a'], res['b']):
self.assertEqual(ra['m'], rb['m'])
self.assertEqual(
{x['@x'] for x in ra['friends']},
set(rb['friend_nums']),
)
async def test_edgeql_volatility_select_nested_04a(self):
for query in self.test_loop(one=True):
res = await query("""
WITH O := (SELECT Obj {
friends := (SELECT Tgt { x := next() } )
}),
SELECT {
a := (SELECT O {friends: {n, x}}),
b := (SELECT O {friends: {n, x}}),
};
""")
self.assertEqual(len(res['a']), 3)
for ra, rb in zip(res['a'], res['b']):
self.assertEqual(len(ra['friends']), 4)
self.assertEqual(
sorted((x['n'], x['x']) for x in ra['friends']),
sorted((x['n'], x['x']) for x in rb['friends']),
)
async def test_edgeql_volatility_select_nested_04b(self):
for query in self.test_loop(one=True):
res = await query("""
WITH O := (SELECT Obj {
tgt: { x := next() }
}),
SELECT {
a := (SELECT O {tgt: {n, x}}),
b := (SELECT O {tgt: {n, x}}),
};
""")
self.assertEqual(len(res['a']), 3)
for ra, rb in zip(res['a'], res['b']):
self.assertEqual(len(ra['tgt']), 2)
self.assertEqual(
sorted((x['n'], x['x']) for x in ra['tgt']),
sorted((x['n'], x['x']) for x in rb['tgt']),
)
async def test_edgeql_volatility_select_nested_05(self):
for query in self.test_loop(10, one=True):
res = await query("""
WITH O := (SELECT Obj {
m := rand_int(100),
friends := (SELECT Tgt { x := next() }
FILTER random() > 0.4)
}),
SELECT {
a := (SELECT O {m, n, friends: {n, x}, ha := .friends.x}),
b := (SELECT O {
m,
friends_tuples := (.friends.n, .friends.x),
friend_sums := sum(.friends.x),
}),
c := (O.n, O.friends {n, x}, O.friends {n, x}),
};
""")
cs = {x['n']: [] for x in res['a']}
for rc in res['c']:
self.assertEqual(rc[1]['n'], rc[2]['n'])
self.assertEqual(rc[1]['x'], rc[2]['x'])
cs[rc[0]].append([rc[1]['n'], rc[1]['x']])
for ra, rb in zip(res['a'], res['b']):
self.assertLessEqual(len(ra['friends']), 4)
self.assertEqual(
sorted(x['x'] for x in ra['friends']),
sorted(ra['ha']),
)
self.assertEqual(
sorted([x['n'], x['x']] for x in ra['friends']),
sorted(rb['friends_tuples']),
)
self.assertEqual(
sorted(cs[ra['n']]),
sorted(rb['friends_tuples']),
)
self.assertEqual(sum(ra['ha']), rb['friend_sums'])
async def test_edgeql_volatility_select_nested_06a(self):
# here we want some deduplicating to happen
for query in self.test_loop(one=True):
res = await query("""
WITH O := (SELECT Obj {
friends := (SELECT Tgt { x := next() })
}),
SELECT {
x := (O { friends: {x} }),
y := O.friends.x,
};
""")
self.assertEqual(len(res['y']), 4)
all_xs = {t['x'] for r in res['x'] for t in r['friends']}
self.assertTrue(set(res['y']).issubset(all_xs))
async def test_edgeql_volatility_select_nested_06b(self):
# here we want some deduplicating to happen
for query in self.test_loop(one=True):
res = await query("""
WITH O := (SELECT Obj {
friends := (SELECT Tgt { x := next() })
}),
SELECT {
x := (O { friends: {n, x} }),
y := O.friends {n, x},
};
""")
self.assertEqual(len(res['y']), 4)
all_xs = {(t['n'], t['x']) for r in res['x'] for t in r['friends']}
y = {(t['n'], t['x']) for t in res['y']}
self.assertTrue(y.issubset(all_xs))
async def test_edgeql_volatility_select_nested_06c(self):
# here we want some deduplicating to happen
for query in self.test_loop():
res = await query("""
WITH O := (SELECT Obj {
friends := (SELECT Tgt { x := next() })
}),
SELECT ((SELECT O.friends.x), (SELECT O.friends.x));
""")
self.assertEqual(len(res), 16)
async def test_edgeql_volatility_select_nested_06d(self):
# here we want some deduplicating to happen
for query in self.test_loop():
res = await query("""
WITH O := (SELECT Obj {
friends := (SELECT Tgt { x := next() })
}),
SELECT O.friends;
""")
self.assertEqual(len(res), 4)
res = await query("""
WITH O := (SELECT (SELECT Obj {
friends := (SELECT Tgt { x := next() })
})),
SELECT O.friends;
""")
self.assertEqual(len(res), 4)
async def test_edgeql_volatility_select_nested_07a(self):
for query in self.test_loop(10):
res = await query("""
SELECT Obj {
n,
tgt: {
n,
} FILTER random() < 0.5
}
FILTER EXISTS (.tgt);
""")
for row in res:
self.assertGreater(len(row['tgt']), 0)
async def test_edgeql_volatility_select_nested_07b(self):
for query in self.test_loop(10):
res = await query("""
SELECT Obj {
n,
tgts := (SELECT .tgt {
n,
} FILTER random() < 0.5)
}
FILTER EXISTS (.tgts);
""")
for row in res:
self.assertGreater(len(row['tgts']), 0)
@test.xfail("Arrays containing objects are hard; TODO: fail?")
async def test_edgeql_volatility_select_arrays_01(self):
for query in self.test_loop(one=True):
res = await query("""
WITH O := [(SELECT Obj {m := next()})],
SELECT {
foo := (SELECT O[0] {m}),
bar := (SELECT O[0] {m}),
};
""")
self.assertEqual(res['foo'], res['bar'])
self.assertEqual(len(res['foo']), 3)
async def test_edgeql_volatility_select_tuples_01(self):
for query in self.test_loop(one=True):
res = await query("""
WITH O := ((SELECT Obj {m := next()}),),
SELECT {
foo := (SELECT O.0 {n, m}),
bar := (SELECT O.0 {n, m}),
};
""")
self.assertEqual(res['foo'], res['bar'])
self.assertEqual(len(res['foo']), 3)
async def test_edgeql_volatility_select_tuples_02(self):
for query in self.test_loop(one=True):
res = await query("""
WITH O := (z := ((SELECT Obj {m := next()}),)),
SELECT {
foo := (SELECT O.z.0 {n, m}),
bar := (SELECT O.z.0 {n, m}),
os := O,
ms := O.z.0.m,
};
""")
self.assertEqual(res['foo'], res['bar'])
self.assertEqual(len(res['foo']), 3)
self.assertEqual(
{x['m'] for x in res['foo']},
set(res['ms']),
)
async def test_edgeql_volatility_select_tuples_03(self):
await self.assert_query_result(r'''
WITH X := ((SELECT Obj { m := next() }),),
Y := ((SELECT Obj { m := next() }),),
SELECT count((SELECT (X, Y) FILTER X = Y));
''', [
3,
])
await self.assert_query_result(r'''
WITH X := ((SELECT Obj { m := next() }),),
Y := ((SELECT Obj { m := next() }),),
SELECT count((SELECT (X, Y) FILTER X < Y));
''', [
3,
])
await self.assert_query_result(r'''
WITH X := ((SELECT Obj { m := next() }),),
Y := (Obj,),
SELECT count((SELECT (X, Y) FILTER X < Y));
''', [
3,
])
async def test_edgeql_volatility_insert_01(self):
for query in self.test_loop(one=True):
res = await query("""
WITH
Foo := (SELECT (
INSERT Obj {n := 10}
) { m := next() })
SELECT {
foo := Foo {n, m},
bar := Foo {n, m},
};
""")
self.assertEqual(res['foo']['n'], 10)
self.assertEqual(res['foo']['m'], res['bar']['m'])
async def test_edgeql_volatility_nested_link_01(self):
# next() should get called once for each Obj/Tgt pair
for query in self.test_loop():
res = await query(
r"""
SELECT Obj {
l := (SELECT Tgt { m := next() }),
};
"""
)
nums = [t['m'] for o in res for t in o['l']]
self.assertEqual(len(nums), len(set(nums)))
async def test_edgeql_volatility_hack_01(self):
await self.assert_query_result(r'''
SELECT (FOR x IN {1,2} UNION (SELECT Obj { m := vol_id(x) }))
{ n, m } ORDER BY .m THEN .n;
''', [
{"m": 1, "n": 1},
{"m": 1, "n": 2},
{"m": 1, "n": 3},
{"m": 2, "n": 1},
{"m": 2, "n": 2},
{"m": 2, "n": 3},
])
async def test_edgeql_volatility_hack_02(self):
await self.assert_query_result(r'''
WITH X := (FOR x IN {1,2} UNION (SELECT Obj { m := vol_id(x) }))
SELECT X { n, m } ORDER BY .m THEN .n;
''', [
{"m": 1, "n": 1},
{"m": 1, "n": 2},
{"m": 1, "n": 3},
{"m": 2, "n": 1},
{"m": 2, "n": 2},
{"m": 2, "n": 3},
])
async def test_edgeql_volatility_hack_03a(self):
await self.assert_query_result(r'''
WITH X := (WITH x := {1,2}, SELECT (x, Obj {m := vol_id(x)})).1
SELECT X { n, m } ORDER BY .m THEN .n;
''', [
{"m": 1, "n": 1},
{"m": 1, "n": 2},
{"m": 1, "n": 3},
{"m": 2, "n": 1},
{"m": 2, "n": 2},
{"m": 2, "n": 3},
])
async def test_edgeql_volatility_hack_03b(self):
await self.assert_query_result(r'''
WITH X := (WITH x := {1,2}, SELECT (x, Obj {m := vol_id(x)}).1)
SELECT X { n, m } ORDER BY .m THEN .n;
''', [
{"m": 1, "n": 1},
{"m": 1, "n": 2},
{"m": 1, "n": 3},
{"m": 2, "n": 1},
{"m": 2, "n": 2},
{"m": 2, "n": 3},
])
async def test_edgeql_volatility_hack_04a(self):
await self.assert_query_result(r'''
SELECT (WITH x := {1,2}, SELECT (x, Obj {m := vol_id(x)})).1
{ n, m } ORDER BY .m THEN .n;
''', [
{"m": 1, "n": 1},
{"m": 1, "n": 2},
{"m": 1, "n": 3},
{"m": 2, "n": 1},
{"m": 2, "n": 2},
{"m": 2, "n": 3},
])
async def test_edgeql_volatility_hack_04b(self):
await self.assert_query_result(r'''
SELECT (WITH x := {1,2}, SELECT (x, Obj {m := vol_id(x)}).1)
{ n, m } ORDER BY .m THEN .n;
''', [
{"m": 1, "n": 1},
{"m": 1, "n": 2},
{"m": 1, "n": 3},
{"m": 2, "n": 1},
{"m": 2, "n": 2},
{"m": 2, "n": 3},
])
@test.xfail("We generate SQL with a missing FROM-clause entry")
async def test_edgeql_volatility_for_like_hard_01(self):
for query in self.test_loop():
res = await query("""
WITH
O := (SELECT Obj { x := next() }),
Z := (O, (SELECT O { n, x, y := -.x })).1
SELECT Z { n, x, y };
""")
self.assertEqual(len(res), 3)
self.assertNotEqual(res[0]['x'], res[1]['x'])
for obj in res:
self.assertEqual(obj['x'], -obj['y'])
@test.xfail("We produce too many rows")
async def test_edgeql_volatility_for_like_hard_02(self):
for query in self.test_loop():
res = await query("""
WITH
O := (SELECT Obj { x := next() }),
Z := (O, ({ o := O })).1
SELECT Z { o: {n, x} };
""")
self.assertEqual(len(res), 3)
self.assertNotEqual(res[0]['o']['x'], res[1]['o']['x'])
@test.xfail("Fails finding a range var")
async def test_edgeql_volatility_for_like_hard_03(self):
for query in self.test_loop():
res = await query("""
WITH
O := (SELECT Obj { x := next() }),
Za := (O, ({ o := O })),
Z := Za.1
SELECT Z { o: {n, x} };
""")
self.assertEqual(len(res), 3)
self.assertNotEqual(res[0]['o']['x'], res[1]['o']['x'])
@test.xfail("We produce too many rows")
async def test_edgeql_volatility_for_hard_01(self):
# XXX: Z not getting materialized (but that's not all that's wrong?)
for query in self.test_loop():
res = await query("""
WITH Z := (FOR O IN {(
SELECT Obj { x := next() }
)} UNION (
SELECT O { y := -.x }
)),
SELECT Z { n, x, y };
""")
self.assertEqual(len(res), 3)
self.assertNotEqual(res[0]['x'], res[1]['x'])
for obj in res:
self.assertEqual(obj['x'], -obj['y'])
@test.xfail("We produce too many rows")
async def test_edgeql_volatility_for_hard_02(self):
for query in self.test_loop():
res = await query("""
WITH Z := (FOR O IN {(
SELECT Obj { x := next() }
)} UNION (
SELECT { a := O { n, x, y := -.x } }
)),
SELECT Z { a: { n, x, y }};
""")
self.assertEqual(len(res), 3)
self.assertNotEqual(res[0]['a']['x'], res[1]['a']['x'])
for obj in res:
self.assertEqual(obj['a']['x'], -obj['a']['y'])
@test.xfail("We produce too many rows")
async def test_edgeql_volatility_for_hard_03(self):
for query in self.test_loop():
res = await query("""
WITH Z := (FOR O IN {(
SELECT Obj {
tgt: { x := next() }
}
)} UNION (
SELECT O {tgt: {n, x, y := -.x}}
)),
SELECT Z { tgt: {n, x, y} };
""")
self.assertEqual(len(res), 3)
for obj in res:
for tgt in obj['tgt']:
self.assertEqual(tgt['x'], -tgt['y'])
@test.xfail("We generate SQL with a missing FROM-clause entry")
async def test_edgeql_volatility_for_hard_04(self):
for query in self.test_loop():
res = await query("""
WITH Z := (FOR O IN {(
SELECT Obj {
tgt: { x := next() }
}
)} UNION (
SELECT { a := (O {tgt: {n, x, y := -.x}}) }
)),
SELECT Z { a: {tgt: {n, x, y} } };
""")
self.assertEqual(len(res), 3)
for obj in res:
for tgt in obj['a']['tgt']:
self.assertEqual(tgt['x'], -tgt['y'])
async def test_edgeql_volatility_rebind_flat_01(self):
for query in self.test_loop():
res = await query("""
WITH O := (SELECT Obj { x := next() }),
Z := (SELECT O {y := -.x}),
SELECT Z { n, x, y };
""")
self.assertEqual(len(res), 3)
for obj in res:
self.assertEqual(obj['x'], -obj['y'])
async def test_edgeql_volatility_rebind_flat_02(self):
for query in self.test_loop():
res = await query("""
WITH O := (SELECT Obj { x := next() }),
Z := (SELECT O {x, y := -.x}),
SELECT Z { n, x, y };
""")
self.assertEqual(len(res), 3)
for obj in res:
self.assertEqual(obj['x'], -obj['y'])
async def test_edgeql_volatility_rebind_flat_03(self):
for query in self.test_loop():
res = await query("""
WITH O := (SELECT Obj { x := next() }),
Z := (SELECT O {x := .x}),
SELECT (Z.n, (SELECT Z.x), (SELECT Z.x));
""")
self.assertEqual(len(res), 3)
for _, x1, x2 in res:
self.assertEqual(x1, x2)
async def test_edgeql_volatility_rebind_nested_01(self):
for query in self.test_loop():
res = await query("""
WITH O := (
SELECT Obj {
tgt: { x := next() }
}
),
Z := (SELECT O {tgt: {n, x, y := -.x}}),
SELECT Z { tgt: {n, x, y} };
""")
self.assertEqual(len(res), 3)
for obj in res:
for tgt in obj['tgt']:
self.assertEqual(tgt['x'], -tgt['y'])
async def test_edgeql_volatility_rebind_nested_02(self):
for query in self.test_loop():
res = await query("""
WITH O := (
SELECT Obj {
tgt: { x := next() }
}
),
Z := (SELECT O {tgt: {n, y := -.x}}),
SELECT Z { tgt: {n, x, y} };
""")
self.assertEqual(len(res), 3)
for obj in res:
for tgt in obj['tgt']:
self.assertEqual(tgt['x'], -tgt['y'])
async def test_edgeql_volatility_rebind_nested_03(self):
for query in self.test_loop(one=True):
res = await query("""
WITH O := (
SELECT Obj {
tgt: { x := next() }
}
),
Z := { o := (SELECT O {tgt: {n, y := -.x}}) },
SELECT Z { o: {tgt: {n, x, y}} };
""")
for obj in res['o']:
for tgt in obj['tgt']:
self.assertEqual(tgt['x'], -tgt['y'])
async def test_edgeql_volatility_errors_01(self):
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.QueryError,
"can not take cross product of volatile operation",
_position=36):
await self.con.execute(
r"""
SELECT Obj.n + random()
"""
)
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.QueryError,
"can not take cross product of volatile operation",
_position=36):
await self.con.execute(
r"""
SELECT (Obj.n, random())
"""
)
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.QueryError,
"can not take cross product of volatile operation"):
await self.con.execute(
r"""
SELECT ({1,2}, random())
"""
)
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.QueryError,
"can not take cross product of volatile operation",
_position=28):
await self.con.execute(
r"""
SELECT random() + Obj.n
"""
)
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.QueryError,
"can not take cross product of volatile operation",
_position=36):
await self.con.execute(
r"""
SELECT {1,2} + (FOR x in {1,2,3} UNION (x*random()))
"""
)
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.QueryError,
"can not take cross product of volatile operation",
_position=36):
await self.con.execute(
r"""
SELECT ({1,2}, (INSERT Obj { n := 100 }))
"""
)
async with self._run_and_rollback():
with self.assertRaisesRegex(
edgedb.QueryError,
"can not take cross product of volatile operation",
_position=64):
await self.con.execute(
r"""
SELECT ({1,2},
(FOR i in {1,2,3} UNION (
INSERT Obj { n := i })))
"""
)
| 34.512179
| 79
| 0.435632
|
6dfaba6bfb090f2c7902a1f4aa892617d5a3b4a4
| 876
|
py
|
Python
|
src/main.py
|
kschwarz1116/Python-Cohort-Analysis
|
bbcfbfa18b179c0d7edfcc7bc052d628396122c0
|
[
"MIT"
] | null | null | null |
src/main.py
|
kschwarz1116/Python-Cohort-Analysis
|
bbcfbfa18b179c0d7edfcc7bc052d628396122c0
|
[
"MIT"
] | null | null | null |
src/main.py
|
kschwarz1116/Python-Cohort-Analysis
|
bbcfbfa18b179c0d7edfcc7bc052d628396122c0
|
[
"MIT"
] | null | null | null |
"""
This script holds the main functionality for the Cohort Analysis programming challenge.
"""
import sys
from customer import CUSTOMERS
import read_customer
import read_orders
import write_analysis as write
def main() -> None:
"""This function accepts command line arguments and parses them into a table"""
num_args: int = len(sys.argv)
if (num_args < 4) or (num_args > 5):
print("Got improper args")
return
customer_file: str = sys.argv[1]
order_file: str = sys.argv[2]
out_file: str = sys.argv[3]
if num_args == 5:
offset_hours: str = sys.argv[4]
else:
offset_hours = "0"
customers: CUSTOMERS = read_customer.read_customers(customer_file)
read_orders.read_orders(order_file, customers)
write.print_cohort_analysis(write.generate_cohort_analysis(customers, offset_hours), out_file)
main()
| 25.764706
| 98
| 0.705479
|
5722844dd37b6e1ca4bbf2030b58e1bf36e79a8e
| 2,329
|
py
|
Python
|
docassemble_demo/setup.py
|
abramsmatthew/adpllc-test
|
b6ae9d07ed3e98dd9f1fae5f4af8dc5b104ca8e6
|
[
"MIT"
] | null | null | null |
docassemble_demo/setup.py
|
abramsmatthew/adpllc-test
|
b6ae9d07ed3e98dd9f1fae5f4af8dc5b104ca8e6
|
[
"MIT"
] | null | null | null |
docassemble_demo/setup.py
|
abramsmatthew/adpllc-test
|
b6ae9d07ed3e98dd9f1fae5f4af8dc5b104ca8e6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
from setuptools import setup, find_packages
from fnmatch import fnmatchcase
from distutils.util import convert_path
standard_exclude = ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build', './dist', 'EGG-INFO', '*.egg-info')
def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories):
out = {}
stack = [(convert_path(where), '', package)]
while stack:
where, prefix, package = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, '__init__.py')):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package))
else:
stack.append((fn, prefix + name + '/', package))
else:
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out
setup(name='docassemble.demo',
version='0.1',
description=('A demonstration package for docassemble.'),
author='Jonathan Pyle',
author_email='jhpyle@gmail.com',
license='MIT',
url='http://docassemble.org',
packages=find_packages(),
namespace_packages = ['docassemble'],
install_requires = ['docassemble', 'docassemble.base', 'python-dateutil'],
zip_safe = False,
package_data=find_package_data(where='docassemble/demo/', package='docassemble.demo'),
)
| 38.816667
| 121
| 0.526406
|
34ba21b1c5319ccadf544cc6fab9d2408379a3d7
| 2,495
|
py
|
Python
|
memcache_status/templatetags/memcache_status_tags.py
|
davidsomnus/django-memcache-status
|
cca021828578c10421ccfe60c4d3ae9eba7a55cc
|
[
"MIT"
] | 55
|
2015-02-04T16:24:42.000Z
|
2022-01-11T07:03:55.000Z
|
memcache_status/templatetags/memcache_status_tags.py
|
KonstantinKlepikov/django-memcache-status
|
3c6e59871a03d3c79f91a6f006c2dde367a060cc
|
[
"MIT"
] | 14
|
2015-01-02T23:37:43.000Z
|
2021-07-19T17:47:26.000Z
|
memcache_status/templatetags/memcache_status_tags.py
|
KonstantinKlepikov/django-memcache-status
|
3c6e59871a03d3c79f91a6f006c2dde367a060cc
|
[
"MIT"
] | 14
|
2015-03-19T13:40:01.000Z
|
2020-08-26T13:13:49.000Z
|
from __future__ import unicode_literals
import logging
from datetime import datetime
from django import template
from django.apps import apps
from memcache_status.utils import get_cache_stats
logger = logging.getLogger(__name__)
register = template.Library()
@register.simple_tag(takes_context=True)
def memcache_status(context):
request = context.request
config = apps.get_app_config('memcache_status')
if not config.show_cache_stats(request):
logger.debug('Cache stats not shown because user has no permission.')
return []
return get_cache_stats()
class PrettyValue(object):
"""
Helper class that reformats the value. Looks for a method named
``format_<key>_value`` and returns that value. Returns the value
as is, if no format method is found.
"""
def format(self, key, value):
try:
func = getattr(self, 'format_%s_value' % key.lower())
return func(value)
except AttributeError:
return value
def format_limit_maxbytes_value(self, value):
return "%s (%s)" % (value, self.human_bytes(value))
def format_bytes_read_value(self, value):
return "%s (%s)" % (value, self.human_bytes(value))
def format_bytes_written_value(self, value):
return "%s (%s)" % (value, self.human_bytes(value))
def format_uptime_value(self, value):
return self.fract_timestamp(int(value))
def format_time_value(self, value):
return datetime.fromtimestamp(int(value)).strftime('%x %X')
def fract_timestamp(self, s):
years, s = divmod(s, 31556952)
min_, s = divmod(s, 60)
h, min_ = divmod(min_, 60)
d, h = divmod(h, 24)
return '%sy, %sd, %sh, %sm, %ss' % (years, d, h, min_, s)
def human_bytes(self, bytes_):
bytes_ = float(bytes_)
if bytes_ >= 1073741824:
gigabytes_ = bytes_ / 1073741824
size = '%.2fGB' % gigabytes_
elif bytes_ >= 1048576:
megabytes_ = bytes_ / 1048576
size = '%.2fMB' % megabytes_
elif bytes_ >= 1024:
kilobytes_ = bytes_ / 1024
size = '%.2fKB' % kilobytes_
else:
size = '%.2fB' % bytes_
return size
@register.filter
def memcache_status_pretty_name(name):
return ' '.join([word.capitalize() for word in name.split('_')])
@register.filter
def memcache_status_pretty_value(value, key):
return PrettyValue().format(key, value)
| 28.678161
| 77
| 0.638477
|
820898cc0db181c1abf7263440ca33731fd7881d
| 14,361
|
py
|
Python
|
fairscale/nn/model_parallel/layers.py
|
aurickq/fairscale
|
909c84462c6c53abcc4c2841d14a9496e6a3e033
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1,662
|
2020-07-15T21:40:19.000Z
|
2022-03-31T10:45:12.000Z
|
fairscale/nn/model_parallel/layers.py
|
aurickq/fairscale
|
909c84462c6c53abcc4c2841d14a9496e6a3e033
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 648
|
2020-07-21T19:00:32.000Z
|
2022-03-30T23:11:41.000Z
|
fairscale/nn/model_parallel/layers.py
|
aurickq/fairscale
|
909c84462c6c53abcc4c2841d14a9496e6a3e033
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 170
|
2020-07-16T00:28:01.000Z
|
2022-03-15T19:39:21.000Z
|
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Parts of the code here are adapted from PyTorch
# repo: https://github.com/pytorch/pytorch
from typing import Callable, Optional
import torch
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn.parameter import Parameter
from .initialize import get_model_parallel_rank, get_model_parallel_world_size
from .mappings import (
copy_to_model_parallel_region,
gather_from_model_parallel_region,
reduce_from_model_parallel_region,
scatter_to_model_parallel_region,
)
from .utils import VocabUtility, divide_and_check_no_remainder
def _initialize_affine_weight(
weight: torch.Tensor,
out_features: int,
in_features: int,
per_partition_size: int,
partition_dim: int,
init_method: Callable[[torch.Tensor], torch.Tensor],
stride: int = 1,
return_master_weight: bool = False,
) -> Optional[torch.Tensor]:
"""Initialize affine weight for model parallel.
Build the master weight on all processes and scatter
the relevant chunk."""
# If we only use 1 process for model parallelism, bypass scatter.
world_size = get_model_parallel_world_size()
if world_size == 1:
init_method(weight)
if return_master_weight:
return weight
return None
# Initialize master weight
master_weight = torch.empty(out_features, in_features, dtype=weight.dtype, requires_grad=False)
init_method(master_weight)
# Split and copy
per_partition_per_stride_size = divide_and_check_no_remainder(per_partition_size, stride)
weight_list = torch.split(master_weight, per_partition_per_stride_size, dim=partition_dim)
rank = get_model_parallel_rank()
my_weight_list = weight_list[rank::world_size]
with torch.no_grad():
torch.cat(my_weight_list, dim=partition_dim, out=weight)
if return_master_weight:
return master_weight
return None
class VocabParallelEmbedding(torch.nn.Module):
"""Embedding parallelized in the vocabulary dimension.
This is mainly adapted from torch.nn.Embedding and all the default
values are kept.
Arguments:
num_embeddings: vocabulary size.
embedding_dim: size of hidden state.
init_method: method to initialize weights.
"""
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
init_method: Callable[[torch.Tensor], torch.Tensor] = init.xavier_normal_,
) -> None:
super(VocabParallelEmbedding, self).__init__()
# Keep the input dimensions.
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self._weight = None
# Divide the weight matrix along the vocaburaly dimension.
self.vocab_start_index, self.vocab_end_index = VocabUtility.vocab_range_from_global_vocab_size(
self.num_embeddings, get_model_parallel_rank(), get_model_parallel_world_size()
)
self.num_embeddings_per_partition = self.vocab_end_index - self.vocab_start_index
# Allocate weights.
self.weight = Parameter(torch.Tensor(self.num_embeddings_per_partition, self.embedding_dim))
# And initialize.
_initialize_affine_weight(
self.weight, self.num_embeddings, self.embedding_dim, self.num_embeddings_per_partition, 0, init_method
)
def forward(self, input_: torch.Tensor) -> torch.Tensor: # type: ignore
# Build the mask.
input_mask = (input_ < self.vocab_start_index) | (input_ >= self.vocab_end_index)
# Mask the input.
masked_input = input_.clone() - self.vocab_start_index
masked_input[input_mask] = 0
# Get the embeddings.
output_parallel = F.embedding(
masked_input,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
# Mask the output embedding.
output_parallel[input_mask, :] = 0.0
# Reduce across all the model parallel GPUs.
output = reduce_from_model_parallel_region(output_parallel)
return output
class ParallelEmbedding(torch.nn.Module):
"""Embedding parallelized in the embedding dimension.
This is mainly adapted from torch.nn.Embedding and all the default
values are kept.
Arguments:
num_embeddings: vocabulary size.
embedding_dim: size of hidden state.
init_method: method to initialize weights.
"""
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
init_method: Callable[[torch.Tensor], torch.Tensor] = init.xavier_normal_,
keep_master_weight_for_test: bool = False,
) -> None:
super(ParallelEmbedding, self).__init__()
# Keep the input dimensions.
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = scale_grad_by_freq
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self._weight = None
# Divide the weight matrix along the embedding dimension.
world_size = get_model_parallel_world_size()
self.embedding_dim_per_partition = divide_and_check_no_remainder(self.embedding_dim, world_size)
# Allocate weights.
self.weight = Parameter(torch.Tensor(self.num_embeddings, self.embedding_dim_per_partition))
# And initialize.
_initialize_affine_weight(
self.weight,
self.num_embeddings,
self.embedding_dim,
self.embedding_dim_per_partition,
1,
init_method,
stride=1,
return_master_weight=False,
)
def forward(self, input_: torch.Tensor) -> torch.Tensor: # type: ignore
input_parallel = copy_to_model_parallel_region(input_)
output_parallel = F.embedding(
input_parallel,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
output = gather_from_model_parallel_region(output_parallel)
return output
class ColumnParallelLinear(torch.nn.Module):
"""Linear layer with column parallelism.
The linear layer is defined as Y = XA + b. A is parallelized along
its second dimension as A = [A_1, ..., A_p].
Arguments:
in_features: first dimension of matrix A.
out_features: second dimension of matrix A.
bias: If true, add bias
gather_output: If true, call all-gether on output and make Y avaiable
to all GPUs, otherwise, every GPU will have its output
which is Y_i = XA_i
init_method: method to initialize weights. Note that bias is always set
to zero.
stride: For the strided linear layers.
keep_master_weight_for_test: This was added for testing and should be
set to False. It returns the master weights
used for initialization.
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
gather_output: bool = True,
init_method: Callable[[torch.Tensor], torch.Tensor] = init.xavier_normal_,
stride: int = 1,
keep_master_weight_for_test: bool = False,
) -> None:
super(ColumnParallelLinear, self).__init__()
# Keep input parameters
self.in_features = in_features
self.out_features = out_features
self.gather_output = gather_output
# Divide the weight matrix along the last dimension.
world_size = get_model_parallel_world_size()
self.output_size_per_partition = divide_and_check_no_remainder(out_features, world_size)
# Parameters.
# Note: torch.nn.functional.linear performs XA^T + b and as a result
# we allocate the transpose.
self.weight = Parameter(torch.Tensor(self.output_size_per_partition, self.in_features))
if bias:
self.bias = Parameter(torch.Tensor(self.output_size_per_partition))
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter("bias", None)
# Initialize weight.
self.master_weight = _initialize_affine_weight(
self.weight,
self.out_features,
self.in_features,
self.output_size_per_partition,
0,
init_method,
stride=stride,
return_master_weight=keep_master_weight_for_test,
)
def get_master_weight(self) -> torch.Tensor:
return gather_from_model_parallel_region(self.weight.data.transpose(0, 1)).transpose_(0, 1)
def forward(self, input_: torch.Tensor) -> torch.Tensor: # type: ignore
# Set up backprop all-reduce.
input_parallel = copy_to_model_parallel_region(input_)
# Matrix multiply.
output_parallel = F.linear(input_parallel, self.weight, self.bias)
if self.gather_output:
# All-gather across the partitions.
output = gather_from_model_parallel_region(output_parallel)
else:
output = output_parallel
return output
class RowParallelLinear(torch.nn.Module):
"""Linear layer with row parallelism.
The linear layer is defined as Y = XA + b. A is parallelized along
its first dimension and X along its second dimension as:
- -
| A_1 |
| . |
A = | . | X = [X_1, ..., X_p]
| . |
| A_p |
- -
Arguments:
in_features: first dimension of matrix A.
out_features: second dimension of matrix A.
bias: If true, add bias. Note that bias is not parallelized.
input_is_parallel: If true, we assume that the input is already
split across the GPUs and we do not split
again.
init_method: method to initialize weights. Note that bias is always set
to zero.
stride: For the strided linear layers.
keep_master_weight_for_test: This was added for testing and should be
set to False. It returns the master weights
used for initialization.
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
input_is_parallel: bool = False,
init_method: Callable[[torch.Tensor], torch.Tensor] = init.xavier_normal_,
stride: int = 1,
keep_master_weight_for_test: bool = False,
):
super(RowParallelLinear, self).__init__()
# Keep input parameters
self.in_features = in_features
self.out_features = out_features
self.input_is_parallel = input_is_parallel
# Divide the weight matrix along the last dimension.
world_size = get_model_parallel_world_size()
self.input_size_per_partition = divide_and_check_no_remainder(in_features, world_size)
# Parameters.
# Note: torch.nn.functional.linear performs XA^T + b and as a result
# we allocate the transpose.
self.weight = Parameter(torch.Tensor(self.out_features, self.input_size_per_partition))
if bias:
self.bias = Parameter(torch.Tensor(self.out_features))
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter("bias", None)
# Initialize weight.
self.master_weight = _initialize_affine_weight(
self.weight,
self.out_features,
self.in_features,
self.input_size_per_partition,
1,
init_method,
stride=stride,
return_master_weight=keep_master_weight_for_test,
)
def get_master_weight(self) -> torch.Tensor:
return gather_from_model_parallel_region(self.weight.data)
def forward(self, input_: torch.Tensor) -> torch.Tensor: # type:ignore
# Set up backprop all-reduce.
if self.input_is_parallel:
input_parallel = input_
else:
input_parallel = scatter_to_model_parallel_region(input_)
# Matrix multiply.
output_parallel = F.linear(input_parallel, self.weight)
# All-reduce across all the partitions.
output_ = reduce_from_model_parallel_region(output_parallel)
if self.bias is not None:
output = output_ + self.bias
else:
output = output_
return output
| 37.012887
| 115
| 0.64898
|
65da5e742665a6ded0a0d8de4795566fbc14749b
| 693
|
py
|
Python
|
backend/aporte/migrations/0012_auto_20200430_0427.py
|
felipemaion/financial-management-gate-django
|
5f93a7d28a55852fed0a16d1830f92b0ee065948
|
[
"MIT"
] | 2
|
2019-04-15T20:36:48.000Z
|
2020-02-09T23:20:27.000Z
|
backend/aporte/migrations/0012_auto_20200430_0427.py
|
felipemaion/financial-management-gate-django
|
5f93a7d28a55852fed0a16d1830f92b0ee065948
|
[
"MIT"
] | 5
|
2020-02-12T00:06:06.000Z
|
2020-06-05T05:09:45.000Z
|
backend/aporte/migrations/0012_auto_20200430_0427.py
|
felipemaion/financial-management-gate-django
|
5f93a7d28a55852fed0a16d1830f92b0ee065948
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.3 on 2020-04-30 04:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aporte', '0011_auto_20200429_2118'),
]
operations = [
migrations.RenameField(
model_name='instrument',
old_name='crpNm',
new_name='crpnNm',
),
migrations.RemoveField(
model_name='instrument',
name='mctyCtgyNm',
),
migrations.AddField(
model_name='instrument',
name='sctyCtgyNm',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='SctyCtgyNm'),
),
]
| 24.75
| 100
| 0.574315
|
bbebf4b639ff48563708a358a378006c6dfdd4d2
| 964
|
py
|
Python
|
pup/sort.py
|
duckinator/pup-tools
|
6f2064450bf27fc5bfbb09aeb01cf66ba4474060
|
[
"MIT"
] | null | null | null |
pup/sort.py
|
duckinator/pup-tools
|
6f2064450bf27fc5bfbb09aeb01cf66ba4474060
|
[
"MIT"
] | null | null | null |
pup/sort.py
|
duckinator/pup-tools
|
6f2064450bf27fc5bfbb09aeb01cf66ba4474060
|
[
"MIT"
] | null | null | null |
import argparse
import sys
def parseargs(args=None):
parser = argparse.ArgumentParser(description='Sort lines fed via stdin.')
parser.add_argument('--key',
help='Function to use for key sorting.')
parser.add_argument('--reverse', '-r', action='store_true',
help='Perform a reversed sort.')
parser.add_argument('--separator', '-s',
help='Use the provided value instead of newlines.')
return parser.parse_args(args)
def main(args=None):
opts = parseargs(args)
key = opts.key
sep = opts.separator
if isinstance(key, str):
key = eval(key)
if sep is None:
sepfn = str.splitlines
else:
sepfn = lambda x: x.split(sep)
vals = sepfn(sys.stdin.read())
ret = sorted(vals, key=key)
if opts.reverse:
ret = reversed(ret)
print(*ret, sep='\n')
return 0
if __name__ == '__main__':
sys.exit(main())
| 25.368421
| 77
| 0.589212
|
56158bcce71a8c24f1a042f5b700b65a77c621ab
| 21,087
|
py
|
Python
|
scripts/bin/base_ip.py
|
open-switch/opx-nas-linux
|
073b287c7c998b0dc16bc732fa37bbdddfd69d66
|
[
"CC-BY-4.0"
] | 1
|
2017-12-28T16:57:02.000Z
|
2017-12-28T16:57:02.000Z
|
scripts/bin/base_ip.py
|
open-switch/opx-nas-linux
|
073b287c7c998b0dc16bc732fa37bbdddfd69d66
|
[
"CC-BY-4.0"
] | 10
|
2017-08-07T22:43:34.000Z
|
2021-06-09T13:34:01.000Z
|
scripts/bin/base_ip.py
|
open-switch/opx-nas-linux
|
073b287c7c998b0dc16bc732fa37bbdddfd69d66
|
[
"CC-BY-4.0"
] | 14
|
2017-01-05T19:18:42.000Z
|
2020-03-06T10:01:04.000Z
|
#!/usr/bin/python
# Copyright (c) 2019 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT
# LIMITATION ANY IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS
# FOR A PARTICULAR PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
import cps
import subprocess
import sys
import cps_object
import cps_utils
import socket
import binascii
import ifindex_utils
import dn_base_ip_tool
import systemd.daemon
import dn_base_ip_tbl_tool
import dn_base_ipsec_utils
import threading
from dn_base_ip_tool import log_err, log_info
iplink_cmd = '/sbin/ip'
_keys = {
'base-ip/ipv4': cps.key_from_name('target', 'base-ip/ipv4'),
'base-ip/ipv6': cps.key_from_name('target', 'base-ip/ipv6'),
cps.key_from_name('target', 'base-ip/ipv4'): 'base-ip/ipv4',
cps.key_from_name('target', 'base-ip/ipv6'): 'base-ip/ipv6',
}
_ip_unreach_key = cps.key_from_name('target', 'os-icmp-cfg/ip-unreachables-config')
_proxy_arp_key = cps.key_from_name('target', 'base-route/proxy-arp-config')
_ip_af = {
2 : 'ipv4',
10 : 'ipv6',
}
_ip_neigh_flush_key = cps.key_from_name('target', 'base-neighbor/flush')
_ipv6_enable_status = {}
def get_next_index(d):
count = 0
while True:
if str(count) not in d:
return count
count += 1
def _get_af_from_name(name):
type = 'ipv4'
if name.find(type) == -1:
type = 'ipv6'
return type
def _get_obj_name(obj):
return _keys[obj.get_key()]
def _get_af_from_obj(obj):
return _get_af_from_name(_get_obj_name(obj))
def _get_proc_fwd_entry(dev, iptype):
return ['proc', 'sys', 'net', iptype, 'conf', dev, 'forwarding']
def _get_proc_disable_ipv6_entry(dev):
return ['proc', 'sys', 'net', 'ipv6', 'conf', dev, 'disable_ipv6']
def _get_proc_ipv6_autoconf_entry(dev):
return ['proc', 'sys', 'net', 'ipv6', 'conf', dev, 'autoconf']
def _get_proc_ipv6_accept_dad_entry(dev):
return ['proc', 'sys', 'net', 'ipv6', 'conf', dev, 'accept_dad']
def _get_proc_ipv4_arp_accept_entry(dev):
return ['proc', 'sys', 'net', 'ipv4', 'conf', dev, 'arp_accept']
def _get_proc_variable(path):
try:
path = '/'.join(path)
with open('/' + path, 'r') as f:
data = f.read()
return int(data)
except:
print "Error reading ", path
return -1
def _set_proc_variable(path, value):
try:
path = '/'.join(path)
with open('/' + path, 'w') as f:
f.write(str(value))
except:
print "Error writing ", path
return -1
def create_obj_from_line(obj_type, ifix, ifname, vrfname):
af = _get_af_from_name(obj_type)
o = cps_object.CPSObject(obj_type, data={'base-ip/' + af + '/vrf-id': 0,
'base-ip/' + af + '/ifindex': ifix,
'base-ip/' + af + '/name': ifname,
'base-ip/' + af + '/vrf-name': vrfname,
})
return o
def _get_key_from_obj(obj):
af = _get_af_from_obj(obj)
str_index = 'base-ip/' + af + '/ifindex'
str_name = 'base-ip/' + af + '/name'
name = None
try:
index = obj.get_attr_data(str_index)
name = ifindex_utils.if_indextoname(index)
except:
pass
if name is None:
try:
name = obj.get_attr_data(str_name)
except:
pass
return name
def _ip_line_type_valid(af, ip):
if af == 'ipv4' and ip[0] == 'inet':
return True
if af == 'ipv6' and ip[0] == 'inet6':
return True
return False
def process_ip_line(af, d, ip):
search_str = None
_srch = {'ipv4': 'inet', 'ipv6': 'inet6'}
_af = {'ipv4': socket.AF_INET, 'ipv6': socket.AF_INET6}
if af not in _srch:
return
if ip[0] == _srch[af]:
try:
addr = ip[1]
prefix = ip[2]
addr = binascii.hexlify(socket.inet_pton(_af[af], addr))
prefix = int(prefix)
d['base-ip/' + af + '/address/ip'] = cps_object.types.to_data(
'base-ip/' + af + '/address/ip', addr)
d['base-ip/' + af + '/address/prefix-length'] = cps_object.types.to_data(
'base-ip/' + af + '/address/prefix-length', prefix)
except:
print "Unable to convert address ", header
pass
def add_ip_info(af, o, ip):
if af is None:
return
if 'base-ip/' + af + '/address' not in o.get()['data']:
o.get()['data']['base-ip/' + af + '/address'] = {}
_v = o.get()['data']['base-ip/' + af + '/address']
d = {}
next_index = get_next_index(_v)
process_ip_line(af, d, ip)
if (len(d)) > 0:
_v[str(next_index)] = d
def _get_ip_objs(filt, resp):
af = _get_af_from_obj(filt)
name = _get_key_from_obj(filt)
vrf_name = None
try:
vrf_name = filt.get_attr_data('base-ip/' + af + '/vrf-name')
except:
# VRF-name is optional attribute.
pass
if (vrf_name is None) and (name is not None):
vrf_name = 'default'
lst = dn_base_ip_tool.get_if_details(vrf_name, name)
for _if in lst:
o = create_obj_from_line('base-ip/' + af, _if.ifix, _if.ifname, _if.vrf_name)
name = o.get_attr_data('base-ip/' + af + '/name')
if not filt.key_compare(
{'base-ip/' + af + '/name': name,
'base-ip/' + af + '/ifindex': o.get_attr_data('base-ip/' + af + '/ifindex')}):
continue
fwd = _get_proc_variable(
_get_proc_fwd_entry(o.get_attr_data('base-ip/' + af + '/name'), af))
if fwd == -1:
fwd = 0
o.add_attr('base-ip/' + af + '/forwarding', fwd)
if af == 'ipv6':
enabled = _ipv6_enable_status.get(name, None)
log_msg = 'IPv6 intf-name:' + name + ' enabled status in DB:' + str(enabled)
log_info(log_msg)
if enabled is None:
enabled = 1
disable_ipv6 = _get_proc_variable(_get_proc_disable_ipv6_entry(name))
if disable_ipv6 == -1 or disable_ipv6 == 1:
enabled = 0
o.add_attr('base-ip/' + af + '/enabled', enabled)
autoconf = _get_proc_variable(_get_proc_ipv6_autoconf_entry(name))
if autoconf == -1 or autoconf == 0:
autoconf = 0
o.add_attr('base-ip/' + af + '/autoconf', autoconf)
accept_dad = _get_proc_variable(_get_proc_ipv6_accept_dad_entry(name))
if accept_dad != 1 and accept_dad != -1:
o.add_attr('base-ip/' + af + '/accept-dad', accept_dad + 1)
log_msg = 'IPv6 intf-name:' + name + ' fwd status:' + str(fwd) + ' ipv6 status:' \
+ str(enabled) + 'auto conf:' + str(autoconf) + 'accept_dad:' + str(accept_dad)
log_info(log_msg)
else:
log_msg = 'IPv4 intf-name:' + name + ' fwd status:' + str(fwd)
log_info(log_msg)
for _ip in _if.ip:
add_ip_info(af, o, _ip)
resp.append(o.get())
return True
def get_cb(methods, params):
obj = cps_object.CPSObject(obj=params['filter'])
resp = params['list']
if obj.get_key() == _keys['base-ip/ipv4'] or obj.get_key() == _keys['base-ip/ipv6']:
return _get_ip_objs(obj, resp)
return False
def trans_cb(methods, params):
obj = cps_object.CPSObject(obj=params['change'])
af = _get_af_from_obj(obj)
name = _get_key_from_obj(obj)
if name is None:
print "Missing keys for request ", obj
return False
vrf_name = 'default'
try:
vrf_name = obj.get_attr_data('base-ip/' + af + '/vrf-name')
except:
# VRF-name is optional attribute.
pass
addr = ""
try:
if params['operation'] == 'set' and obj.get_key() == _keys['base-ip/' + af]:
if af == 'ipv6':
try:
enabled = obj.get_attr_data('base-ip/' + af + '/enabled')
if enabled == 1:
disable_ipv6 = 0
else:
disable_ipv6 = 1
_ipv6_enable_status[name] = enabled
if vrf_name == 'default':
ret_val = _set_proc_variable(_get_proc_disable_ipv6_entry(name),\
str(disable_ipv6))
else:
ret_val = dn_base_ip_tool.disable_ipv6_config(name, str(disable_ipv6), vrf_name)
log_msg = 'CPS set for VRF:' + vrf_name + 'intf-name:' + name + ' ipv6 status:' +\
str(enabled) + 'ret_val:' + str(ret_val)
log_info(log_msg)
if ret_val == -1:
return False
except:
pass
try:
autoconf = obj.get_attr_data('base-ip/' + af + '/autoconf')
if vrf_name == 'default':
ret_val = _set_proc_variable(_get_proc_ipv6_autoconf_entry(name), str(autoconf))
else:
ret_val = dn_base_ip_tool.ipv6_autoconf_config(name, autoconf, vrf_name)
log_msg = 'CPS set for VRF:' + vrf_name + 'intf-name:' + name + ' ipv6 auto conf status:'\
+ str(autoconf) + 'ret_val:' + str(ret_val)
log_info(log_msg)
if ret_val == -1:
return False
except:
pass
try:
accept_dad = obj.get_attr_data('base-ip/' + af + '/accept-dad')
# Check the valid enum values
if accept_dad not in [1,2,3]:
return False
# CPS enum starts from 1 but kernel enum starts from 0
accept_dad = accept_dad - 1
if vrf_name == 'default':
ret_val = _set_proc_variable(_get_proc_ipv6_accept_dad_entry(name), str(accept_dad))
else:
ret_val = dn_base_ip_tool.ipv6_accept_dad_config(name, str(accept_dad), vrf_name)
log_msg = 'CPS set for VRF:' + vrf_name + 'intf-name:' + name + ' ipv6 accept DAD status:'\
+ str(accept_dad) + 'ret_val:' + str(ret_val)
log_info(log_msg)
if ret_val == -1:
return False
except:
pass
elif af == 'ipv4':
try:
arp_accept = obj.get_attr_data('base-ip/' + af + '/arp-accept')
if arp_accept == 1:
arp_accept = 0
else:
arp_accept = 1
if vrf_name == 'default':
ret_val = _set_proc_variable(_get_proc_ipv4_arp_accept_entry(name), str(arp_accept))
else:
ret_val = dn_base_ip_tool.ipv4_arp_accept_config(name, str(arp_accept), vrf_name)
log_msg = 'CPS set for VRF:' + vrf_name + 'intf-name:' + name + ' ipv4 arp accept status:'\
+ str(arp_accept) + 'ret_val:' + str(ret_val)
log_info(log_msg)
if ret_val == -1:
return False
except:
pass
try:
fwd = obj.get_attr_data('base-ip/' + af + '/forwarding')
if vrf_name == 'default':
ret_val = _set_proc_variable(_get_proc_fwd_entry(name, af), str(fwd))
else:
ret_val = dn_base_ip_tool.ip_forwarding_config(af, name, str(fwd), vrf_name)
log_msg = 'CPS set for VRF:' + vrf_name + 'intf-name:' + name + ' fwd status:' + str(fwd)\
+ 'ret_val:' + str(ret_val)
log_info(log_msg)
if ret_val == -1:
return False
except:
pass
return True
except Exception as e:
log_err("Faild to commit operation exception:%s params:%s"% (e, params))
return False
def ip_unreach_attr(t):
return 'os-icmp-cfg/ip-unreachables-config/input/' + t
def set_ip_unreach_cb(methods, params):
obj = cps_object.CPSObject(obj=params['change'])
if params['operation'] != 'rpc':
log_err('oper is not RPC')
return False
operation = ip_unreach_attr('operation')
enable = ip_unreach_attr('enable')
af = ip_unreach_attr('af')
ifname = ip_unreach_attr('ifname')
vrf_name = ip_unreach_attr('vrf-name')
dev = None
vrf = None
try:
operation = obj.get_attr_data(operation)
af = obj.get_attr_data(af)
enable = obj.get_attr_data(enable)
except ValueError as e:
log_msg = 'Missing mandatory attribute ' + e.args[0]
log_err(log_msg)
return False
try:
vrf = obj.get_attr_data(vrf_name)
except:
pass
vrf = 'default'
# Operation types
#BASE_CMN_OPERATION_TYPE_CREATE=1
#BASE_CMN_OPERATION_TYPE_DELETE=2
#BASE_CMN_OPERATION_TYPE_UPDATE=3
is_add = True;
if operation == 3:
log_msg = 'Update operation is not supported!'
log_err(log_msg)
return False
elif operation == 2:
is_add = False;
if af != socket.AF_INET and af != socket.AF_INET6:
log_msg = 'Invalid address family' + str(af)
log_err(log_msg)
return False
try:
dev = obj.get_attr_data(ifname)
except:
pass
log_info('Ifname is not present in the object')
if dn_base_ip_tbl_tool.ip_tables_unreach_rule(vrf, is_add, enable, af, dev):
return True
log_msg = 'Failed to execute IP unreachable request ' + str(is_add) + str(af) \
+ 'enable' + str(enable) + 'ifname' + ifname
log_err(log_msg)
return False
def ip_neigh_flush_attr(t):
return 'base-neighbor/flush/input/' + t
def _create_neigh_flush_ip_and_prefix_from_attr(ip_addr, prefix_len, af):
addr = binascii.unhexlify(ip_addr)
addr = socket.inet_ntop(af, addr)
if prefix_len is not None:
addr = addr + '/' + str(prefix_len)
return addr
def _nbr_flush_handle(vrf_name, af, if_name):
obj = cps_object.CPSObject(module='base-route/nbr-flush')
obj.add_attr("base-route/nbr-flush/input/vrf-name", str(vrf_name))
obj.add_attr("base-route/nbr-flush/input/af", af)
# Incase of leaked VRF neigh flush, this 'dev' wont be present
# in the leaked VRF and hence flush_ip_neigh is expected to fail.
if if_name is not None:
obj.add_attr("base-route/nbr-flush/input/ifname", if_name)
l = []
tr_obj = {'change': obj.get(), 'operation': 'rpc'}
l.append(tr_obj)
return cps.transaction(l)
def flush_ip_neigh_cb(methods, params):
obj = cps_object.CPSObject(obj=params['change'])
if params['operation'] != 'rpc':
log_err('oper is not RPC')
return False
vrf_name = ip_neigh_flush_attr('vrf-name')
af = ip_neigh_flush_attr('af')
ifname = ip_neigh_flush_attr('ifname')
ip_addr = ip_neigh_flush_attr('ip')
prefix_len = ip_neigh_flush_attr('prefix-len')
dev = None
try:
vrf_name = obj.get_attr_data(vrf_name)
except ValueError as e:
log_msg = 'Missing mandatory attribute ' + e.args[0]
log_err(log_msg)
return False
try:
af = obj.get_attr_data(af)
except:
pass
af = socket.AF_INET
log_info('Address family is not present in the object')
if af != socket.AF_INET and af != socket.AF_INET6:
log_msg = 'Invalid address family' + str(af)
log_err(log_msg)
return False
try:
dev = obj.get_attr_data(ifname)
except:
pass
dev = None
log_info('Ifname is not present in the object')
try:
ip_addr = obj.get_attr_data(ip_addr)
except:
pass
ip_addr = None
try:
prefix_len = obj.get_attr_data(prefix_len)
except:
pass
prefix_len = None
addr = None
if ip_addr is not None:
addr = _create_neigh_flush_ip_and_prefix_from_attr(ip_addr, prefix_len, af)
log_msg = 'IP neigh flush request vrf-name:' + str(vrf_name)\
+ ' af:' + str(af) + ' ifname:' + str(dev)\
+ ' to addr:' + str(addr)
log_info(log_msg)
if dev is not None:
for ifname in dev:
if dn_base_ip_tool.is_intf_exist_in_vrf(str(vrf_name), ifname):
val = dn_base_ip_tool.flush_ip_neigh(_ip_af[af], ifname, addr, str(vrf_name))
else:
val = _nbr_flush_handle(str(vrf_name), af, ifname)
else:
val = dn_base_ip_tool.flush_ip_neigh(_ip_af[af], dev, addr, str(vrf_name))
if val is False:
log_err("IP neigh flush on VRF:%s af:%s addr:%s failed"% (str(vrf_name),\
str(af), str(addr)))
val = _nbr_flush_handle(str(vrf_name), af, dev)
return val
def proxy_arp_attr(t):
return 'base-route/proxy-arp-config/' + t
def set_proxy_arp_cb(methods, params):
obj = cps_object.CPSObject(obj=params['change'])
vrf_name = proxy_arp_attr('vrf-name')
ifname = proxy_arp_attr('ifname')
vrf = None
dev = None
try:
vrf = obj.get_attr_data(vrf_name)
dev = obj.get_attr_data(ifname)
except ValueError as e:
log_msg = 'Missing mandatory attribute ' + e.args[0]
log_err(log_msg)
return False
log_info("Proxy ARP configuration on VRF:%s intf:%s operation:%s"% (vrf, dev, params['operation']))
try:
if params['operation'] == 'create':
if dn_base_ip_tool.proxy_arp_config(dev, 1, vrf):
return True
if params['operation'] == 'delete':
if dn_base_ip_tool.proxy_arp_config(dev, 0, vrf):
return True
except Exception as e:
log_err("Faild to commit operation exception:%s params:%s"% (e, params))
log_err("Proxy ARP configuration failed on VRF:%s intf:%s operation:%s"% (vrf, dev, params['operation']))
return False
def sigterm_hdlr(signum, frame):
global shutdown
shutdown = True
if __name__ == '__main__':
shutdown = False
# Install signal handlers.
import signal
signal.signal(signal.SIGTERM, sigterm_hdlr)
if len(sys.argv) > 1:
l = []
_get_ip_objs(cps_object.CPSObject('base-ip/ipv4'), l)
for i in l:
cps_utils.print_obj(i)
sys.exit(1)
handle = cps.obj_init()
d = {}
d['get'] = get_cb
d['transaction'] = trans_cb
for i in _keys.keys():
if i.find('base-ip') == -1:
continue
cps.obj_register(handle, _keys[i], d)
# IPSec Object registration
dn_base_ipsec_utils.obj_reg()
# Set IPSec Authentication and Encryption keys type as string
dn_base_ipsec_utils.add_attr_type()
d = {}
d['transaction'] = set_ip_unreach_cb
cps.obj_register(handle, _ip_unreach_key, d)
log_msg = 'CPS IP unreachable registration done'
log_info(log_msg)
d = {}
d['transaction'] = flush_ip_neigh_cb
cps.obj_register(handle, _ip_neigh_flush_key, d)
log_msg = 'CPS IP neighbor flush registration done'
log_info(log_msg)
d = {}
d['transaction'] = set_proxy_arp_cb
cps.obj_register(handle, _proxy_arp_key, d)
log_msg = 'CPS Proxy ARP registration done'
log_info(log_msg)
#Start interface event handle thread to program LLA into the kernel.
lla_cfg_thread = threading.Thread(target=dn_base_ip_tool.handle_interface_event_for_lla_cfg,\
name="IPv6_Intf_LLA_Cfg")
lla_cfg_thread.setDaemon(True)
lla_cfg_thread.start()
#Start ipv6 address event handle thread to handle the DAD failures
lla_cfg_thread = threading.Thread(target=dn_base_ip_tool.handle_addr_event,\
name="IPv6_Addr_Dad_Handle")
lla_cfg_thread.setDaemon(True)
lla_cfg_thread.start()
# Notify systemd: Daemon is ready
systemd.daemon.notify("READY=1")
# wait until a signal is received
while False == shutdown:
signal.pause()
systemd.daemon.notify("STOPPING=1")
# cleanup code here
# No need to specifically call sys.exit(0).
# That's the default behavior in Python.
| 32.243119
| 111
| 0.570257
|
819a4783fe0ab411739cdc8339aff340c97c9631
| 6,566
|
py
|
Python
|
third_party/WebKit/Tools/Scripts/webkitpy/common/system/user_unittest.py
|
xzhan96/chromium.src
|
1bd0cf3997f947746c0fc5406a2466e7b5f6159e
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2020-07-15T09:50:14.000Z
|
2020-07-15T09:50:14.000Z
|
third_party/WebKit/Tools/Scripts/webkitpy/common/system/user_unittest.py
|
emilio/chromium.src
|
1bd0cf3997f947746c0fc5406a2466e7b5f6159e
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
third_party/WebKit/Tools/Scripts/webkitpy/common/system/user_unittest.py
|
emilio/chromium.src
|
1bd0cf3997f947746c0fc5406a2466e7b5f6159e
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 6
|
2020-09-23T08:56:12.000Z
|
2021-11-18T03:40:49.000Z
|
# Copyright (C) 2010 Research in Motion Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Research in Motion Ltd. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.user import User
class UserTest(unittest.TestCase):
example_user_response = "example user response"
def test_prompt_repeat(self):
self.repeatsRemaining = 2
def mock_raw_input(message):
self.repeatsRemaining -= 1
if not self.repeatsRemaining:
return UserTest.example_user_response
return None
self.assertEqual(User.prompt("input", repeat=self.repeatsRemaining,
raw_input=mock_raw_input), UserTest.example_user_response)
def test_prompt_when_exceeded_repeats(self):
self.repeatsRemaining = 2
def mock_raw_input(message):
self.repeatsRemaining -= 1
return None
self.assertIsNone(User.prompt("input", repeat=self.repeatsRemaining, raw_input=mock_raw_input))
def test_prompt_with_multiple_lists(self):
def run_prompt_test(inputs, expected_result, can_choose_multiple=False):
def mock_raw_input(message):
return inputs.pop(0)
output_capture = OutputCapture()
actual_result = output_capture.assert_outputs(
self,
User.prompt_with_multiple_lists,
args=["title", ["subtitle1", "subtitle2"], [["foo", "bar"], ["foobar", "barbaz", "foobaz"]]],
kwargs={"can_choose_multiple": can_choose_multiple, "raw_input": mock_raw_input},
expected_stdout="title\n\nsubtitle1\n 1. foo\n 2. bar\n\nsubtitle2\n 3. foobar\n 4. barbaz\n 5. foobaz\n")
self.assertEqual(actual_result, expected_result)
self.assertEqual(len(inputs), 0)
run_prompt_test(["1"], "foo")
run_prompt_test(["badinput", "2"], "bar")
run_prompt_test(["3"], "foobar")
run_prompt_test(["4"], "barbaz")
run_prompt_test(["5"], "foobaz")
run_prompt_test(["1,2"], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test(["1-3"], ["foo", "bar", "foobar"], can_choose_multiple=True)
run_prompt_test(["1-2,3"], ["foo", "bar", "foobar"], can_choose_multiple=True)
run_prompt_test(["2-1,3"], ["foobar"], can_choose_multiple=True)
run_prompt_test([" 1, 2 "], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test(["all"], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
run_prompt_test([""], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
run_prompt_test([" "], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
run_prompt_test(["badinput", "all"], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
def test_prompt_with_list(self):
def run_prompt_test(inputs, expected_result, can_choose_multiple=False):
def mock_raw_input(message):
return inputs.pop(0)
output_capture = OutputCapture()
actual_result = output_capture.assert_outputs(
self,
User.prompt_with_list,
args=["title", ["foo", "bar"]],
kwargs={"can_choose_multiple": can_choose_multiple, "raw_input": mock_raw_input},
expected_stdout="title\n 1. foo\n 2. bar\n")
self.assertEqual(actual_result, expected_result)
self.assertEqual(len(inputs), 0)
run_prompt_test(["1"], "foo")
run_prompt_test(["badinput", "2"], "bar")
run_prompt_test(["1,2"], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test([" 1, 2 "], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test(["all"], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test([""], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test([" "], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test(["badinput", "all"], ["foo", "bar"], can_choose_multiple=True)
def test_confirm(self):
test_cases = (
(("Continue? [Y/n]: ", True), (User.DEFAULT_YES, 'y')),
(("Continue? [Y/n]: ", False), (User.DEFAULT_YES, 'n')),
(("Continue? [Y/n]: ", True), (User.DEFAULT_YES, '')),
(("Continue? [Y/n]: ", False), (User.DEFAULT_YES, 'q')),
(("Continue? [y/N]: ", True), (User.DEFAULT_NO, 'y')),
(("Continue? [y/N]: ", False), (User.DEFAULT_NO, 'n')),
(("Continue? [y/N]: ", False), (User.DEFAULT_NO, '')),
(("Continue? [y/N]: ", False), (User.DEFAULT_NO, 'q')),
)
for test_case in test_cases:
expected, inputs = test_case
def mock_raw_input(message):
self.assertEqual(expected[0], message)
return inputs[1]
result = User().confirm(default=inputs[0],
raw_input=mock_raw_input)
self.assertEqual(expected[1], result)
| 49.368421
| 122
| 0.636917
|
f122587ac48e43ff7b7ad178cbcb2de1da883deb
| 3,571
|
py
|
Python
|
saleor/graphql/plugins/resolvers.py
|
csmatar/saleor
|
125c90cc185731b49733178e02c0b43bf5939013
|
[
"CC-BY-4.0"
] | 1
|
2022-02-21T07:17:08.000Z
|
2022-02-21T07:17:08.000Z
|
saleor/graphql/plugins/resolvers.py
|
csmatar/saleor
|
125c90cc185731b49733178e02c0b43bf5939013
|
[
"CC-BY-4.0"
] | 81
|
2021-10-11T04:26:07.000Z
|
2022-03-28T04:46:43.000Z
|
saleor/graphql/plugins/resolvers.py
|
csmatar/saleor
|
125c90cc185731b49733178e02c0b43bf5939013
|
[
"CC-BY-4.0"
] | 1
|
2022-02-16T22:00:59.000Z
|
2022-02-16T22:00:59.000Z
|
from collections import defaultdict
from typing import Dict, List, Tuple
from ...plugins.base_plugin import BasePlugin, ConfigurationTypeField
from .filters import (
filter_plugin_by_type,
filter_plugin_search,
filter_plugin_status_in_channels,
)
from .sorters import sort_plugins
from .types import Plugin
def hide_private_configuration_fields(configuration, config_structure):
if not config_structure:
return
for field in configuration:
name = field["name"]
value = field["value"]
if value is None:
continue
field_type = config_structure.get(name, {}).get("type")
if field_type == ConfigurationTypeField.PASSWORD:
field["value"] = "" if value else None
if field_type in [
ConfigurationTypeField.SECRET,
ConfigurationTypeField.SECRET_MULTILINE,
]:
if not value:
field["value"] = None
elif len(value) > 4:
field["value"] = value[-4:]
else:
field["value"] = value[-1:]
def aggregate_plugins_configuration(
manager,
) -> Tuple[Dict[str, BasePlugin], Dict[str, List[BasePlugin]]]:
plugins_per_channel: Dict[str, List[BasePlugin]] = defaultdict(list)
global_plugins: Dict[str, BasePlugin] = {}
for plugin in manager.all_plugins:
hide_private_configuration_fields(plugin.configuration, plugin.CONFIG_STRUCTURE)
if not getattr(plugin, "CONFIGURATION_PER_CHANNEL", False):
global_plugins[plugin.PLUGIN_ID] = plugin
else:
plugins_per_channel[plugin.PLUGIN_ID].append(plugin)
return global_plugins, plugins_per_channel
def resolve_plugin(id, manager):
global_plugins, plugins_per_channel = aggregate_plugins_configuration(manager)
plugin: BasePlugin = manager.get_plugin(id)
if not plugin:
return None
return Plugin(
id=plugin.PLUGIN_ID,
global_configuration=global_plugins.get(plugin.PLUGIN_ID),
channel_configurations=plugins_per_channel.get(plugin.PLUGIN_ID),
description=plugin.PLUGIN_DESCRIPTION,
name=plugin.PLUGIN_NAME,
)
def resolve_plugins(manager, sort_by=None, **kwargs):
global_plugins, plugins_per_channel = aggregate_plugins_configuration(manager)
plugin_filter = kwargs.get("filter", {})
search_query = plugin_filter.get("search")
filter_status_in_channel = plugin_filter.get("status_in_channels")
filter_plugin_type = plugin_filter.get("type")
plugins = [
Plugin(
id=plugin.PLUGIN_ID,
global_configuration=plugin,
channel_configurations=None,
description=plugin.PLUGIN_DESCRIPTION,
name=plugin.PLUGIN_NAME,
)
for _, plugin in global_plugins.items()
]
plugins.extend(
[
Plugin(
id=plugin_id,
global_configuration=None,
channel_configurations=plugins,
description=plugins[0].PLUGIN_DESCRIPTION,
name=plugins[0].PLUGIN_NAME,
)
for plugin_id, plugins in plugins_per_channel.items()
]
)
if filter_status_in_channel is not None:
plugins = filter_plugin_status_in_channels(plugins, filter_status_in_channel)
if filter_plugin_type is not None:
plugins = filter_plugin_by_type(plugins, filter_plugin_type)
plugins = filter_plugin_search(plugins, search_query)
plugins = sort_plugins(plugins, sort_by)
return plugins
| 33.064815
| 88
| 0.66816
|
17e87b19675035acb7788a57875791fc239f91e9
| 4,335
|
py
|
Python
|
catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages:/home/bala/duckietown/catkin_ws/src:/home/bala/duckietown/catkin_ws/src/lib/python2.7/site-packages/contracts/testing/test_docstring_parsing.py
|
johnson880319/Software
|
045894227f359e0a3a3ec5b7a53f8d1ebc06acdd
|
[
"CC-BY-2.0"
] | 13
|
2018-03-28T23:07:01.000Z
|
2022-03-12T06:01:21.000Z
|
catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages:/home/bala/duckietown/catkin_ws/src:/home/bala/duckietown/catkin_ws/src/lib/python2.7/site-packages/contracts/testing/test_docstring_parsing.py
|
johnson880319/Software
|
045894227f359e0a3a3ec5b7a53f8d1ebc06acdd
|
[
"CC-BY-2.0"
] | 11
|
2018-06-18T15:49:07.000Z
|
2021-11-25T01:45:33.000Z
|
catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages:/home/bala/duckietown/catkin_ws/src:/home/bala/duckietown/catkin_ws/src/lib/python2.7/site-packages/contracts/testing/test_docstring_parsing.py
|
johnson880319/Software
|
045894227f359e0a3a3ec5b7a53f8d1ebc06acdd
|
[
"CC-BY-2.0"
] | 5
|
2018-03-28T23:07:05.000Z
|
2021-12-09T19:02:00.000Z
|
import unittest
from ..docstring_parsing import DocStringInfo, Arg, number_of_spaces
from contracts.interface import add_prefix
examples = {"""
Provides a RGB representation of the values by interpolating the range
[min(value),max(value)] into the colorspace [min_color, max_color].
:param value: The field to represent.
:type value: HxW array
:param max_value: If specified, everything *above* is clipped.
:type max_value: float
:param min_value: If specified, everything *below* is clipped.
:type min_value: float
:param min_color: Color to give to the minimum values.
:return: A RGB image.
:rtype: HxWx3 uint8
:return: gray
""": DocStringInfo(docstring='\n Provides a RGB representation of the values by interpolating the range\n'
' [min(value),max(value)] into the colorspace [min_color, max_color].\n',
params={
'value': Arg('The field to represent.', 'HxW array'),
'max_value': Arg('If specified, everything *above* is clipped.', 'float'),
'min_value': Arg('If specified, everything *below* is clipped.', 'float'),
'min_color': Arg('Color to give to the minimum values.', None),
},
returns=[Arg('A RGB image.', "HxWx3 uint8"), Arg('gray', None)]
)
}
class DocStringTest(unittest.TestCase):
def test_parsing(self):
for string in examples:
parsed = DocStringInfo.parse(string)
"%s" % parsed
"%r" % parsed
result = examples[string]
self.assertEqual(result, parsed)
def test_number_of_spaces(self):
self.assertEqual(number_of_spaces(''), 0)
self.assertEqual(number_of_spaces(' '), 1)
self.assertEqual(number_of_spaces(' '), 2)
self.assertEqual(number_of_spaces('11'), 0)
self.assertEqual(number_of_spaces(' 223'), 1)
self.assertEqual(number_of_spaces(' 4343'), 2)
def test_reparsing(self):
for string, result in examples.items(): #@UnusedVariable
parsed = DocStringInfo.parse(string)
converted = "%s" % parsed
reparsed = DocStringInfo.parse(converted)
msg = ('First string:\n%s\nParsed as:\n%s\n' %
(add_prefix(string, '|'), add_prefix('%r' % parsed, '|')))
msg += ('Converted:\n%s\nReparsed as:\n%s\n' %
(add_prefix(converted, '|'), add_prefix('%r' % reparsed, '|')))
self.assertEqual(parsed, reparsed, msg=msg)
def test_inline_params(self):
def test_inline_parsing(docstring, expected_type="type",
expected_desc="desc"):
info = DocStringInfo.parse(docstring)
self.assertTrue("name" in info.params)
self.assertEqual(info.params["name"].type, expected_type)
self.assertEqual(info.params["name"].desc, expected_desc)
# Proper syntax
test_inline_parsing(":param type name: desc")
test_inline_parsing(":param name: desc", None)
test_inline_parsing(":param name:", None, None)
# Weird syntax for people who like to break things.
test_inline_parsing(" : param type name : desc ")
test_inline_parsing(" : param name : desc ", None)
test_inline_parsing(" : param name : ", None, None)
test_inline_parsing(" : param type , > 0 name : ", "type , > 0", None)
def test_inline_returns(self):
def test_inline_parsing(docstring, expected_type="type",
expected_desc="desc"):
info = DocStringInfo.parse(docstring)
self.assertTrue(len(info.returns) > 0)
self.assertEqual(info.returns[0].type, expected_type)
self.assertEqual(info.returns[0].desc, expected_desc)
# Proper syntax
test_inline_parsing(":returns type: desc")
test_inline_parsing(":returns: desc", None)
test_inline_parsing(":returns:", None, None)
# Weird syntax for people who like to break things.
test_inline_parsing(" : returns type : desc ")
test_inline_parsing(" : returns : desc ", None)
test_inline_parsing(" : returns : ", None, None)
test_inline_parsing(" : returns type , > 0 : ", "type , > 0", None)
| 39.770642
| 117
| 0.610611
|
dc9d593d4487ed0db1bf1e904519fad7cbe322aa
| 16,073
|
py
|
Python
|
widgets/editor_widgets.py
|
cristian64/mkdd-track-editor
|
e8e735033ca11127e6686d81e7377110ad628915
|
[
"MIT"
] | null | null | null |
widgets/editor_widgets.py
|
cristian64/mkdd-track-editor
|
e8e735033ca11127e6686d81e7377110ad628915
|
[
"MIT"
] | null | null | null |
widgets/editor_widgets.py
|
cristian64/mkdd-track-editor
|
e8e735033ca11127e6686d81e7377110ad628915
|
[
"MIT"
] | 2
|
2022-03-30T13:01:26.000Z
|
2022-03-31T06:25:37.000Z
|
import traceback
from io import StringIO
from itertools import chain
from math import acos, pi
import os
import sys
from PyQt5.QtGui import QMouseEvent, QWheelEvent, QPainter, QColor, QFont, QFontMetrics, QPolygon, QImage, QPixmap, QKeySequence
from PyQt5.QtWidgets import (QWidget, QListWidget, QListWidgetItem, QDialog, QMenu, QLineEdit, QFileDialog, QScrollArea,
QMdiSubWindow, QHBoxLayout, QVBoxLayout, QLabel, QPushButton, QTextEdit, QAction, QShortcut)
import PyQt5.QtWidgets as QtWidgets
import PyQt5.QtCore as QtCore
from PyQt5.QtCore import QSize, pyqtSignal, QPoint, QRect
from PyQt5.QtCore import Qt
import PyQt5.QtGui as QtGui
import lib.libbol as libbol
from widgets.data_editor import choose_data_editor
from lib.libbol import get_full_name
def catch_exception(func):
def handle(*args, **kwargs):
try:
return func(*args, **kwargs)
except KeyboardInterrupt:
QtWidgets.QApplication.quit()
except:
traceback.print_exc()
#raise
return handle
def catch_exception_with_dialog(func):
def handle(*args, **kwargs):
try:
print(args, kwargs)
return func(*args, **kwargs)
except Exception as e:
traceback.print_exc()
print("hey")
open_error_dialog(str(e), None)
return handle
def catch_exception_with_dialog_nokw(func):
def handle(*args, **kwargs):
try:
print(args, kwargs)
return func(*args, **kwargs)
except Exception as e:
traceback.print_exc()
open_error_dialog(str(e), None)
return handle
def open_error_dialog(errormsg, self):
errorbox = QtWidgets.QMessageBox()
errorbox.critical(self, "Error", errormsg)
errorbox.setFixedSize(500, 200)
class ErrorAnalyzer(QMdiSubWindow):
@catch_exception
def __init__(self, bol, *args, **kwargs):
super().__init__(*args, **kwargs)
font = QFont()
font.setFamily("Consolas")
font.setStyleHint(QFont.Monospace)
font.setFixedPitch(True)
font.setPointSize(10)
self.setWindowTitle("Analysis Results")
self.text_widget = QTextEdit(self)
self.setWidget(self.text_widget)
self.resize(900, 500)
self.setMinimumSize(QSize(300, 300))
self.text_widget.setFont(font)
self.text_widget.setReadOnly(True)
self.analyze_bol_and_write_results(bol)
@catch_exception
def analyze_bol_and_write_results(self, bol):
results = StringIO()
def write_line(line):
results.write(line)
results.write("\n")
# Check enemy point linkage errors
links = {}
for group_index, group in enumerate(bol.enemypointgroups.groups):
for i, point in enumerate(group.points):
if point.link == -1:
continue
if point.link not in links:
links[point.link] = [(group_index, i, point)]
else:
links[point.link].append(((group_index, i, point)))
for link_id, points in links.items():
if len(points) == 1:
group_index, i, point = points[0]
write_line("Point {0} in enemy point group {1} has link {2}; No other point has link {2}".format(
i, group_index, point.link
))
for group_index, group in enumerate(bol.enemypointgroups.groups):
print(group.points[0].link, group.points[-1].link)
if group.points[0].link == -1:
write_line("Start point of enemy point group {0} has no valid link to form a loop".format(group_index))
if group.points[-1].link == -1:
write_line("End point of enemy point group {0} has no valid link to form a loop".format(group_index))
# Check prev/next groups of checkpoints
for i, group in enumerate(bol.checkpoints.groups):
for index in chain(group.prevgroup, group.nextgroup):
if index != -1:
if index < -1 or index+1 > len(bol.checkpoints.groups):
write_line("Checkpoint group {0} has invalid Prev or Nextgroup index {1}".format(
i, index
))
# Validate path id in objects
for object in bol.objects.objects:
if object.pathid < -1 or object.pathid + 1 > len(bol.routes):
write_line("Map object {0} uses path id {1} that does not exist".format(
get_full_name(object.objectid), object.pathid
))
# Validate Kart start positions
if len(bol.kartpoints.positions) == 0:
write_line("Map contains no kart start points")
else:
exist = [False for x in range(8)]
for i, kartstartpos in enumerate(bol.kartpoints.positions):
if kartstartpos.playerid == 0xFF:
if all(exist):
write_line("Duplicate kart start point for all karts")
exist = [True for x in range(8)]
elif kartstartpos.playerid > 8:
write_line("A kart start point with an invalid player id exists: {0}".format(
kartstartpos.playerid
))
elif exist[kartstartpos.playerid]:
write_line("Duplicate kart start point for player id {0}".format(
kartstartpos.playerid))
else:
exist[kartstartpos.playerid] = True
# Check camera indices in areas
for i, area in enumerate(bol.areas.areas):
if area.camera_index < -1 or area.camera_index + 1 > len(bol.cameras):
write_line("Area {0} uses invalid camera index {1}".format(i, area.camera_index))
# Check cameras
for i, camera in enumerate(bol.cameras):
if camera.nextcam < -1 or camera.nextcam + 1 > len(bol.cameras):
write_line("Camera {0} uses invalid nextcam (next camera) index {1}".format(
i, camera.nextcam
))
if camera.route < -1 or camera.route + 1 > len(bol.routes):
write_line("Camera {0} uses invalid path id {1}".format(i,
camera.route))
if len(bol.checkpoints.groups) == 0:
write_line("You need at least one checkpoint group!")
if len(bol.enemypointgroups.groups) == 0:
write_line("You need at least one enemy point group!")
self.check_checkpoints_convex(bol, write_line)
text = results.getvalue()
if not text:
text = "No known common errors detected!"
self.text_widget.setText(text)
def check_checkpoints_convex(self, bol, write_line):
for gindex, group in enumerate(bol.checkpoints.groups):
if len(group.points) > 1:
for i in range(1, len(group.points)):
c1 = group.points[i-1]
c2 = group.points[i]
lastsign = None
for p1, mid, p3 in ((c1.start, c2.start, c2.end),
(c2.start, c2.end, c1.end),
(c2.end, c1.end, c1.start),
(c1.end, c1.start, c2.start)):
side1 = p1 - mid
side2 = p3 - mid
prod = side1.x * side2.z - side2.x * side1.z
if lastsign is None:
lastsign = prod > 0
else:
if not (lastsign == (prod > 0)):
write_line("Quad formed by checkpoints {0} and {1} in checkpoint group {2} isn't convex.".format(
i-1, i, gindex
))
break
class AddPikObjectWindow(QMdiSubWindow):
triggered = pyqtSignal(object)
closing = pyqtSignal()
def closeEvent(self, event):
self.closing.emit()
super().closeEvent(event)
@catch_exception
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if "windowtype" in kwargs:
self.window_name = kwargs["windowtype"]
else:
self.window_name = "Add Object"
self.resize(900, 500)
self.setMinimumSize(QSize(300, 300))
self.centralwidget = QWidget(self)
self.setWidget(self.centralwidget)
self.entity = None
font = QFont()
font.setFamily("Consolas")
font.setStyleHint(QFont.Monospace)
font.setFixedPitch(True)
font.setPointSize(10)
self.dummywidget = QWidget(self)
self.dummywidget.setMaximumSize(0,0)
self.verticalLayout = QVBoxLayout(self.centralwidget)
self.verticalLayout.setAlignment(Qt.AlignTop)
self.verticalLayout.addWidget(self.dummywidget)
self.setup_dropdown_menu()
self.hbox1 = QHBoxLayout()
self.hbox2 = QHBoxLayout()
self.label1 = QLabel(self.centralwidget)
self.label2 = QLabel(self.centralwidget)
self.label3 = QLabel(self.centralwidget)
self.label1.setText("Group")
self.label2.setText("Position in Group")
self.label3.setText("(-1 means end of Group)")
self.group_edit = QLineEdit(self.centralwidget)
self.position_edit = QLineEdit(self.centralwidget)
self.group_edit.setValidator(QtGui.QIntValidator(0, 2**31-1))
self.position_edit.setValidator(QtGui.QIntValidator(-1, 2**31-1))
self.hbox1.setAlignment(Qt.AlignRight)
self.hbox2.setAlignment(Qt.AlignRight)
self.verticalLayout.addLayout(self.hbox1)
self.verticalLayout.addLayout(self.hbox2)
self.hbox1.addWidget(self.label1)
self.hbox1.addWidget(self.group_edit)
self.hbox2.addWidget(self.label2)
self.hbox2.addWidget(self.position_edit)
self.hbox2.addWidget(self.label3)
self.group_edit.setDisabled(True)
self.position_edit.setDisabled(True)
self.editor_widget = None
self.editor_layout = QScrollArea()#QVBoxLayout(self.centralwidget)
self.verticalLayout.addWidget(self.editor_layout)
#self.textbox_xml = QTextEdit(self.centralwidget)
self.button_savetext = QPushButton(self.centralwidget)
self.button_savetext.setText("Add Object")
self.button_savetext.setToolTip("Hotkey: Ctrl+S")
self.button_savetext.setMaximumWidth(400)
self.button_savetext.setDisabled(True)
self.verticalLayout.addWidget(self.button_savetext)
self.setWindowTitle(self.window_name)
self.created_object = None
#QtWidgets.QShortcut(Qt.CTRL + Qt.Key_S, self).activated.connect(self.emit_add_object)
def keyPressEvent(self, event: QtGui.QKeyEvent):
if event.key() == Qt.CTRL + Qt.Key_S:
self.emit_add_object()
else:
super().keyPressEvent(event)
def emit_add_object(self):
self.button_savetext.pressed.emit()
def get_content(self):
try:
if not self.group_edit.text():
group = None
else:
group = int(self.group_edit.text())
if not self.position_edit.text():
position = None
else:
position = int(self.position_edit.text())
return self.created_object, group, position
except Exception as e:
traceback.print_exc()
open_error_dialog(str(e), self)
return None
def setup_dropdown_menu(self):
self.category_menu = QtWidgets.QComboBox(self)
self.category_menu.addItem("-- select type --")
self.verticalLayout.addWidget(self.category_menu)
self.objecttypes = {
"Enemy Point": libbol.EnemyPoint,
"Checkpoint": libbol.Checkpoint,
"Object Point": libbol.RoutePoint,
"Object": libbol.MapObject,
"Area": libbol.Area,
"Camera": libbol.Camera,
"Respawn Point": libbol.JugemPoint,
"Kart Start Point": libbol.KartStartPoint,
"Enemy Path": libbol.EnemyPointGroup,
"Checkpoint Group": libbol.CheckpointGroup,
"Object Path": libbol.Route,
"Light Param": libbol.LightParam,
"Minigame Param": libbol.MGEntry
}
for item, val in self.objecttypes.items():
self.category_menu.addItem(item)
self.category_menu.currentIndexChanged.connect(self.change_category)
def change_category(self, index):
if index > 0:
item = self.category_menu.currentText()
self.button_savetext.setDisabled(False)
objecttype = self.objecttypes[item]
if self.editor_widget is not None:
self.editor_widget.deleteLater()
self.editor_widget = None
if self.created_object is not None:
del self.created_object
self.created_object = objecttype.new()
if isinstance(self.created_object, (libbol.Checkpoint, libbol.EnemyPoint, libbol.RoutePoint)):
self.group_edit.setDisabled(False)
self.position_edit.setDisabled(False)
self.group_edit.setText("0")
self.position_edit.setText("-1")
else:
self.group_edit.setDisabled(True)
self.position_edit.setDisabled(True)
self.group_edit.clear()
self.position_edit.clear()
data_editor = choose_data_editor(self.created_object)
if data_editor is not None:
self.editor_widget = data_editor(self, self.created_object)
self.editor_layout.setWidget(self.editor_widget)
self.editor_widget.update_data()
else:
self.editor_widget.deleteLater()
self.editor_widget = None
del self.created_object
self.created_object = None
self.button_savetext.setDisabled(True)
self.position_edit.setDisabled(True)
self.group_edit.setDisabled(True)
class SpawnpointEditor(QMdiSubWindow):
triggered = pyqtSignal(object)
closing = pyqtSignal()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.centralwidget = QWidget(self)
self.setWidget(self.centralwidget)
self.entity = None
self.resize(400, 200)
font = QFont()
font.setFamily("Consolas")
font.setStyleHint(QFont.Monospace)
font.setFixedPitch(True)
font.setPointSize(10)
self.verticalLayout = QVBoxLayout(self.centralwidget)
self.position = QLineEdit(self.centralwidget)
self.rotation = QLineEdit(self.centralwidget)
self.button_savetext = QPushButton(self.centralwidget)
self.button_savetext.setText("Set Data")
self.button_savetext.setMaximumWidth(400)
self.verticalLayout.addWidget(QLabel("startPos"))
self.verticalLayout.addWidget(self.position)
self.verticalLayout.addWidget(QLabel("startDir"))
self.verticalLayout.addWidget(self.rotation)
self.verticalLayout.addWidget(self.button_savetext)
self.setWindowTitle("Edit startPos/Dir")
def closeEvent(self, event):
self.closing.emit()
def get_pos_dir(self):
pos = self.position.text().strip()
direction = float(self.rotation.text().strip())
if "," in pos:
pos = [float(x.strip()) for x in pos.split(",")]
else:
pos = [float(x.strip()) for x in pos.split(" ")]
assert len(pos) == 3
return pos, direction
| 36.696347
| 129
| 0.590618
|
12bbcee588e9835228ea369f91c9ba092a06efb0
| 5,190
|
py
|
Python
|
lib/spack/spack/parse.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 9
|
2018-04-18T07:51:40.000Z
|
2021-09-10T03:56:57.000Z
|
lib/spack/spack/parse.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 907
|
2018-04-18T11:17:57.000Z
|
2022-03-31T13:20:25.000Z
|
lib/spack/spack/parse.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29
|
2018-11-05T16:14:23.000Z
|
2022-02-03T16:07:09.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
import shlex
import sys
import itertools
from six import string_types
import spack.error
class Token(object):
"""Represents tokens; generated from input by lexer and fed to parse()."""
def __init__(self, type, value='', start=0, end=0):
self.type = type
self.value = value
self.start = start
self.end = end
def __repr__(self):
return str(self)
def __str__(self):
return "<%d: '%s'>" % (self.type, self.value)
def is_a(self, type):
return self.type == type
def __eq__(self, other):
return (self.type == other.type) and (self.value == other.value)
class Lexer(object):
"""Base class for Lexers that keep track of line numbers."""
def __init__(self, lexicon0, mode_switches_01=[],
lexicon1=[], mode_switches_10=[]):
self.scanner0 = re.Scanner(lexicon0)
self.mode_switches_01 = mode_switches_01
self.scanner1 = re.Scanner(lexicon1)
self.mode_switches_10 = mode_switches_10
self.mode = 0
def token(self, type, value=''):
if self.mode == 0:
return Token(type, value,
self.scanner0.match.start(0),
self.scanner0.match.end(0))
else:
return Token(type, value,
self.scanner1.match.start(0),
self.scanner1.match.end(0))
def lex_word(self, word):
scanner = self.scanner0
mode_switches = self.mode_switches_01
if self.mode == 1:
scanner = self.scanner1
mode_switches = self.mode_switches_10
tokens, remainder = scanner.scan(word)
remainder_used = 0
for i, t in enumerate(tokens):
if t.type in mode_switches:
# Combine post-switch tokens with remainder and
# scan in other mode
self.mode = 1 - self.mode # swap 0/1
remainder_used = 1
tokens = tokens[:i + 1] + self.lex_word(
word[word.index(t.value) + len(t.value):])
break
if remainder and not remainder_used:
raise LexError("Invalid character", word, word.index(remainder))
return tokens
def lex(self, text):
lexed = []
for word in text:
tokens = self.lex_word(word)
lexed.extend(tokens)
return lexed
class Parser(object):
"""Base class for simple recursive descent parsers."""
def __init__(self, lexer):
self.tokens = iter([]) # iterators over tokens, handled in order.
self.token = Token(None) # last accepted token
self.next = None # next token
self.lexer = lexer
self.text = None
def gettok(self):
"""Puts the next token in the input stream into self.next."""
try:
self.next = next(self.tokens)
except StopIteration:
self.next = None
def push_tokens(self, iterable):
"""Adds all tokens in some iterable to the token stream."""
self.tokens = itertools.chain(
iter(iterable), iter([self.next]), self.tokens)
self.gettok()
def accept(self, id):
"""Put the next symbol in self.token if accepted, then call gettok()"""
if self.next and self.next.is_a(id):
self.token = self.next
self.gettok()
return True
return False
def next_token_error(self, message):
"""Raise an error about the next token in the stream."""
raise ParseError(message, self.text, self.token.end)
def last_token_error(self, message):
"""Raise an error about the previous token in the stream."""
raise ParseError(message, self.text, self.token.start)
def unexpected_token(self):
self.next_token_error("Unexpected token: '%s'" % self.next.value)
def expect(self, id):
"""Like accept(), but fails if we don't like the next token."""
if self.accept(id):
return True
else:
if self.next:
self.unexpected_token()
else:
self.next_token_error("Unexpected end of input")
sys.exit(1)
def setup(self, text):
if isinstance(text, string_types):
text = shlex.split(str(text))
self.text = text
self.push_tokens(self.lexer.lex(text))
def parse(self, text):
self.setup(text)
return self.do_parse()
class ParseError(spack.error.SpackError):
"""Raised when we don't hit an error while parsing."""
def __init__(self, message, string, pos):
super(ParseError, self).__init__(message)
self.string = string
self.pos = pos
class LexError(ParseError):
"""Raised when we don't know how to lex something."""
def __init__(self, message, string, pos):
super(LexError, self).__init__(message, string, pos)
| 30.710059
| 79
| 0.587476
|
14788ea83accb89ed904d08bb3dc3fff212b6b7e
| 68
|
py
|
Python
|
demo/demo_app/settings/production_debug.py
|
Nekmo/djangocms-bs3-theme
|
1155588414164d6e5d027131e9181856f8a80d5d
|
[
"MIT"
] | null | null | null |
demo/demo_app/settings/production_debug.py
|
Nekmo/djangocms-bs3-theme
|
1155588414164d6e5d027131e9181856f8a80d5d
|
[
"MIT"
] | 10
|
2018-07-30T15:09:57.000Z
|
2022-03-29T21:54:12.000Z
|
demo/demo_app/settings/production_debug.py
|
Nekmo/djangocms-bs3-theme
|
1155588414164d6e5d027131e9181856f8a80d5d
|
[
"MIT"
] | null | null | null |
from .production import *
DEBUG = True
BOOTSTRAP3_FORCE_SRC = True
| 13.6
| 27
| 0.779412
|
5e62f5982af991c7a8f2f59da86f92fa09375a81
| 215
|
py
|
Python
|
yeast/core/media/sc/sc_no_ino.py
|
irahorecka/sga-fba
|
fc7e923da8e79555780359f018c85b5e5339d8d0
|
[
"MIT"
] | null | null | null |
yeast/core/media/sc/sc_no_ino.py
|
irahorecka/sga-fba
|
fc7e923da8e79555780359f018c85b5e5339d8d0
|
[
"MIT"
] | null | null | null |
yeast/core/media/sc/sc_no_ino.py
|
irahorecka/sga-fba
|
fc7e923da8e79555780359f018c85b5e5339d8d0
|
[
"MIT"
] | null | null | null |
"""
Defines upper bounds of SC-Ino media for FBA
"""
from yeast.core.media.constants import reagents
from yeast.core.media.sc.base import sc
ino = {
reagents["myo-inositol"]: 0.0,
}
sc_no_ino = {**sc, **ino}
| 16.538462
| 47
| 0.683721
|
1aae5f06ddd45b14327ba65dfe9f9395c72a46b5
| 7,945
|
py
|
Python
|
docs/conf.py
|
kforti/variant_calling
|
5b6899115b1c5b1f958d8d8f33b484ccd0a8a787
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
kforti/variant_calling
|
5b6899115b1c5b1f958d8d8f33b484ccd0a8a787
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
kforti/variant_calling
|
5b6899115b1c5b1f958d8d8f33b484ccd0a8a787
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# tokyo_variant_calling documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tokyo_variant_calling'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tokyo_variant_callingdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'tokyo_variant_calling.tex',
u'tokyo_variant_calling Documentation',
u"Kevin Fortier (Pai Lab)", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tokyo_variant_calling', u'tokyo_variant_calling Documentation',
[u"Kevin Fortier (Pai Lab)"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tokyo_variant_calling', u'tokyo_variant_calling Documentation',
u"Kevin Fortier (Pai Lab)", 'tokyo_variant_calling',
'Reproducing variant calling figures from Tokyo group', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 32.428571
| 80
| 0.711517
|
3bc5db5029e791619f4fdd2de2bffba75d058601
| 1,933
|
py
|
Python
|
OpenCv Document Scanner/scan.py
|
PatrickPrakash/OpenCvProjects
|
c29d024a86ea8b2fc5a5aad8b8eb1f128e68b931
|
[
"MIT"
] | null | null | null |
OpenCv Document Scanner/scan.py
|
PatrickPrakash/OpenCvProjects
|
c29d024a86ea8b2fc5a5aad8b8eb1f128e68b931
|
[
"MIT"
] | null | null | null |
OpenCv Document Scanner/scan.py
|
PatrickPrakash/OpenCvProjects
|
c29d024a86ea8b2fc5a5aad8b8eb1f128e68b931
|
[
"MIT"
] | null | null | null |
from pyimagesearch.transform import four_point_transform
from skimage.filters import threshold_local
import numpy as np
import argparse
import cv2
import imutils
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True,
help = "Path to the image to be scanned")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
ratio = image.shape[0] / 500.0
orig = image.copy()
image = imutils.resize(image, height = 500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5,5),0)
edged = cv2.Canny(gray, 75, 200)
# print("STEP 1: Edge Detection")
# cv2.imshow("Image",image)
# cv2.imshow("Edged",edged)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
cnts = cv2.findContours(edged.copy() , cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# if our approximated contour has four points, then we
# can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
# show the contour (outline) of the piece of paper
# print("STEP 2: Find contours of paper")
# cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
# cv2.imshow("Outline", image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
# convert the warped image to grayscale, then threshold it
# to give it that 'black and white' paper effect
warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
T = threshold_local(warped, 11, offset = 10, method = "gaussian")
warped = (warped > T).astype("uint8") * 255
# show the original and scanned images
print("STEP 3: Apply perspective transform")
cv2.imshow("Original", imutils.resize(orig, height = 650))
cv2.imshow("Scanned", imutils.resize(warped, height = 650))
cv2.waitKey(0)
| 33.327586
| 78
| 0.727367
|
8a6ad7eca5917d66fc8e3593df4811aa3731e665
| 15,400
|
py
|
Python
|
backend/src/baserow/api/groups/invitations/views.py
|
cjh0613/baserow
|
62871f5bf53c9d25446976031aacb706c0abe584
|
[
"MIT"
] | 839
|
2020-07-20T13:29:34.000Z
|
2022-03-31T21:09:16.000Z
|
backend/src/baserow/api/groups/invitations/views.py
|
cjh0613/baserow
|
62871f5bf53c9d25446976031aacb706c0abe584
|
[
"MIT"
] | 28
|
2020-08-07T09:23:58.000Z
|
2022-03-01T22:32:40.000Z
|
backend/src/baserow/api/groups/invitations/views.py
|
cjh0613/baserow
|
62871f5bf53c9d25446976031aacb706c0abe584
|
[
"MIT"
] | 79
|
2020-08-04T01:48:01.000Z
|
2022-03-27T13:30:54.000Z
|
from django.db import transaction
from django.db.models import Exists, OuterRef
from django.contrib.auth import get_user_model
from itsdangerous.exc import BadSignature
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated, AllowAny
from drf_spectacular.utils import extend_schema
from drf_spectacular.openapi import OpenApiParameter, OpenApiTypes
from baserow.api.decorators import validate_body, map_exceptions
from baserow.api.errors import (
ERROR_USER_NOT_IN_GROUP,
ERROR_USER_INVALID_GROUP_PERMISSIONS,
ERROR_GROUP_DOES_NOT_EXIST,
ERROR_HOSTNAME_IS_NOT_ALLOWED,
BAD_TOKEN_SIGNATURE,
)
from baserow.api.schemas import get_error_schema
from baserow.api.groups.serializers import GroupUserGroupSerializer
from baserow.api.groups.users.errors import ERROR_GROUP_USER_ALREADY_EXISTS
from baserow.api.groups.invitations.errors import (
ERROR_GROUP_INVITATION_DOES_NOT_EXIST,
ERROR_GROUP_INVITATION_EMAIL_MISMATCH,
)
from baserow.core.models import GroupInvitation
from baserow.core.handler import CoreHandler
from baserow.core.exceptions import (
UserNotInGroup,
UserInvalidGroupPermissionsError,
GroupDoesNotExist,
GroupInvitationDoesNotExist,
BaseURLHostnameNotAllowed,
GroupInvitationEmailMismatch,
GroupUserAlreadyExists,
)
from .serializers import (
GroupInvitationSerializer,
CreateGroupInvitationSerializer,
UpdateGroupInvitationSerializer,
UserGroupInvitationSerializer,
)
User = get_user_model()
class GroupInvitationsView(APIView):
permission_classes = (IsAuthenticated,)
@extend_schema(
parameters=[
OpenApiParameter(
name="group_id",
location=OpenApiParameter.PATH,
type=OpenApiTypes.INT,
description="Returns only invitations that are in the group related "
"to the provided value.",
)
],
tags=["Group invitations"],
operation_id="list_group_invitations",
description=(
"Lists all the group invitations of the group related to the provided "
"`group_id` parameter if the authorized user has admin rights to that "
"group."
),
responses={
200: GroupInvitationSerializer(many=True),
400: get_error_schema(
["ERROR_USER_NOT_IN_GROUP", "ERROR_USER_INVALID_GROUP_PERMISSIONS"]
),
404: get_error_schema(["ERROR_GROUP_DOES_NOT_EXIST"]),
},
)
@map_exceptions(
{
GroupDoesNotExist: ERROR_GROUP_DOES_NOT_EXIST,
UserNotInGroup: ERROR_USER_NOT_IN_GROUP,
UserInvalidGroupPermissionsError: ERROR_USER_INVALID_GROUP_PERMISSIONS,
}
)
def get(self, request, group_id):
"""Lists all the invitations of the provided group id."""
group = CoreHandler().get_group(group_id)
group.has_user(request.user, "ADMIN", raise_error=True)
group_invitations = GroupInvitation.objects.filter(group=group)
serializer = GroupInvitationSerializer(group_invitations, many=True)
return Response(serializer.data)
@extend_schema(
parameters=[
OpenApiParameter(
name="group_id",
location=OpenApiParameter.PATH,
type=OpenApiTypes.INT,
description="Creates a group invitation to the group related to the "
"provided value.",
)
],
tags=["Group invitations"],
operation_id="create_group_invitation",
description=(
"Creates a new group invitations for an email address if the authorized "
"user has admin rights to the related group. An email containing a sign "
"up link will be send to the user."
),
request=CreateGroupInvitationSerializer,
responses={
200: GroupInvitationSerializer,
400: get_error_schema(
[
"ERROR_USER_NOT_IN_GROUP",
"ERROR_USER_INVALID_GROUP_PERMISSIONS",
"ERROR_REQUEST_BODY_VALIDATION",
]
),
404: get_error_schema(["ERROR_GROUP_DOES_NOT_EXIST"]),
},
)
@transaction.atomic
@validate_body(CreateGroupInvitationSerializer)
@map_exceptions(
{
GroupDoesNotExist: ERROR_GROUP_DOES_NOT_EXIST,
UserNotInGroup: ERROR_USER_NOT_IN_GROUP,
UserInvalidGroupPermissionsError: ERROR_USER_INVALID_GROUP_PERMISSIONS,
GroupUserAlreadyExists: ERROR_GROUP_USER_ALREADY_EXISTS,
BaseURLHostnameNotAllowed: ERROR_HOSTNAME_IS_NOT_ALLOWED,
}
)
def post(self, request, data, group_id):
"""Creates a new group invitation and sends it the provided email."""
group = CoreHandler().get_group(group_id)
group_invitation = CoreHandler().create_group_invitation(
request.user, group, **data
)
return Response(GroupInvitationSerializer(group_invitation).data)
class GroupInvitationView(APIView):
permission_classes = (IsAuthenticated,)
@extend_schema(
parameters=[
OpenApiParameter(
name="group_invitation_id",
location=OpenApiParameter.PATH,
type=OpenApiTypes.INT,
description="Returns the group invitation related to the provided "
"value.",
)
],
tags=["Group invitations"],
operation_id="get_group_invitation",
description=(
"Returns the requested group invitation if the authorized user has admin "
"right to the related group"
),
responses={
200: GroupInvitationSerializer,
400: get_error_schema(
["ERROR_USER_NOT_IN_GROUP", "ERROR_USER_INVALID_GROUP_PERMISSIONS"]
),
404: get_error_schema(["ERROR_GROUP_INVITATION_DOES_NOT_EXIST"]),
},
)
@map_exceptions(
{
GroupInvitationDoesNotExist: ERROR_GROUP_INVITATION_DOES_NOT_EXIST,
UserNotInGroup: ERROR_USER_NOT_IN_GROUP,
UserInvalidGroupPermissionsError: ERROR_USER_INVALID_GROUP_PERMISSIONS,
}
)
def get(self, request, group_invitation_id):
"""Selects a single group invitation and responds with a serialized version."""
group_invitation = CoreHandler().get_group_invitation(group_invitation_id)
group_invitation.group.has_user(request.user, "ADMIN", raise_error=True)
return Response(GroupInvitationSerializer(group_invitation).data)
@extend_schema(
parameters=[
OpenApiParameter(
name="group_invitation_id",
location=OpenApiParameter.PATH,
type=OpenApiTypes.INT,
description="Updates the group invitation related to the provided "
"value.",
)
],
tags=["Group invitations"],
operation_id="update_group_invitation",
description=(
"Updates the existing group invitation related to the provided "
"`group_invitation_id` param if the authorized user has admin rights to "
"the related group."
),
request=UpdateGroupInvitationSerializer,
responses={
200: GroupInvitationSerializer,
400: get_error_schema(
[
"ERROR_USER_NOT_IN_GROUP",
"ERROR_USER_INVALID_GROUP_PERMISSIONS",
"ERROR_REQUEST_BODY_VALIDATION",
]
),
404: get_error_schema(["ERROR_GROUP_INVITATION_DOES_NOT_EXIST"]),
},
)
@transaction.atomic
@validate_body(UpdateGroupInvitationSerializer)
@map_exceptions(
{
GroupInvitationDoesNotExist: ERROR_GROUP_INVITATION_DOES_NOT_EXIST,
UserNotInGroup: ERROR_USER_NOT_IN_GROUP,
UserInvalidGroupPermissionsError: ERROR_USER_INVALID_GROUP_PERMISSIONS,
}
)
def patch(self, request, data, group_invitation_id):
"""Updates the group invitation if the user belongs to the group."""
group_invitation = CoreHandler().get_group_invitation(
group_invitation_id,
base_queryset=GroupInvitation.objects.select_for_update(),
)
group_invitation = CoreHandler().update_group_invitation(
request.user, group_invitation, **data
)
return Response(GroupInvitationSerializer(group_invitation).data)
@extend_schema(
parameters=[
OpenApiParameter(
name="group_invitation_id",
location=OpenApiParameter.PATH,
type=OpenApiTypes.INT,
description="Deletes the group invitation related to the provided "
"value.",
)
],
tags=["Group invitations"],
operation_id="delete_group_invitation",
description=(
"Deletes a group invitation if the authorized user has admin rights to "
"the related group."
),
responses={
204: None,
400: get_error_schema(
["ERROR_USER_NOT_IN_GROUP", "ERROR_USER_INVALID_GROUP_PERMISSIONS"]
),
404: get_error_schema(["ERROR_GROUP_INVITATION_DOES_NOT_EXIST"]),
},
)
@transaction.atomic
@map_exceptions(
{
GroupInvitationDoesNotExist: ERROR_GROUP_INVITATION_DOES_NOT_EXIST,
UserNotInGroup: ERROR_USER_NOT_IN_GROUP,
UserInvalidGroupPermissionsError: ERROR_USER_INVALID_GROUP_PERMISSIONS,
}
)
def delete(self, request, group_invitation_id):
"""Deletes an existing group_invitation if the user belongs to the group."""
group_invitation = CoreHandler().get_group_invitation(
group_invitation_id,
base_queryset=GroupInvitation.objects.select_for_update(),
)
CoreHandler().delete_group_invitation(request.user, group_invitation)
return Response(status=204)
class AcceptGroupInvitationView(APIView):
permission_classes = (IsAuthenticated,)
@extend_schema(
parameters=[
OpenApiParameter(
name="group_invitation_id",
location=OpenApiParameter.PATH,
type=OpenApiTypes.INT,
description="Accepts the group invitation related to the provided "
"value.",
)
],
tags=["Group invitations"],
operation_id="accept_group_invitation",
description=(
"Accepts a group invitation with the given id if the email address of the "
"user matches that of the invitation."
),
request=None,
responses={
200: GroupUserGroupSerializer,
400: get_error_schema(["ERROR_GROUP_INVITATION_EMAIL_MISMATCH"]),
404: get_error_schema(["ERROR_GROUP_INVITATION_DOES_NOT_EXIST"]),
},
)
@transaction.atomic
@map_exceptions(
{
GroupInvitationEmailMismatch: ERROR_GROUP_INVITATION_EMAIL_MISMATCH,
GroupInvitationDoesNotExist: ERROR_GROUP_INVITATION_DOES_NOT_EXIST,
}
)
def post(self, request, group_invitation_id):
"""Accepts a group invitation."""
try:
group_invitation = GroupInvitation.objects.select_related("group").get(
id=group_invitation_id
)
except GroupInvitation.DoesNotExist:
raise GroupInvitationDoesNotExist(
f"The group invitation with id {group_invitation_id} does not exist."
)
group_user = CoreHandler().accept_group_invitation(
request.user, group_invitation
)
return Response(GroupUserGroupSerializer(group_user).data)
class RejectGroupInvitationView(APIView):
permission_classes = (IsAuthenticated,)
@extend_schema(
parameters=[
OpenApiParameter(
name="group_invitation_id",
location=OpenApiParameter.PATH,
type=OpenApiTypes.INT,
description="Rejects the group invitation related to the provided "
"value.",
)
],
tags=["Group invitations"],
operation_id="reject_group_invitation",
description=(
"Rejects a group invitation with the given id if the email address of the "
"user matches that of the invitation."
),
request=None,
responses={
204: None,
400: get_error_schema(["ERROR_GROUP_INVITATION_EMAIL_MISMATCH"]),
404: get_error_schema(["ERROR_GROUP_INVITATION_DOES_NOT_EXIST"]),
},
)
@transaction.atomic
@map_exceptions(
{
GroupInvitationEmailMismatch: ERROR_GROUP_INVITATION_EMAIL_MISMATCH,
GroupInvitationDoesNotExist: ERROR_GROUP_INVITATION_DOES_NOT_EXIST,
}
)
def post(self, request, group_invitation_id):
"""Rejects a group invitation."""
try:
group_invitation = GroupInvitation.objects.select_related("group").get(
id=group_invitation_id
)
except GroupInvitation.DoesNotExist:
raise GroupInvitationDoesNotExist(
f"The group invitation with id {group_invitation_id} does not exist."
)
CoreHandler().reject_group_invitation(request.user, group_invitation)
return Response(status=204)
class GroupInvitationByTokenView(APIView):
permission_classes = (AllowAny,)
@extend_schema(
parameters=[
OpenApiParameter(
name="token",
location=OpenApiParameter.PATH,
type=OpenApiTypes.STR,
description="Returns the group invitation related to the provided "
"token.",
)
],
tags=["Group invitations"],
operation_id="get_group_invitation_by_token",
description=(
"Responds with the serialized group invitation if an invitation with the "
"provided token is found."
),
responses={
200: UserGroupInvitationSerializer,
400: get_error_schema(["BAD_TOKEN_SIGNATURE"]),
404: get_error_schema(["ERROR_GROUP_INVITATION_DOES_NOT_EXIST"]),
},
)
@map_exceptions(
{
BadSignature: BAD_TOKEN_SIGNATURE,
GroupInvitationDoesNotExist: ERROR_GROUP_INVITATION_DOES_NOT_EXIST,
}
)
def get(self, request, token):
"""
Responds with the serialized group invitation if an invitation with the
provided token is found.
"""
exists_queryset = User.objects.filter(username=OuterRef("email"))
group_invitation = CoreHandler().get_group_invitation_by_token(
token,
base_queryset=GroupInvitation.objects.annotate(
email_exists=Exists(exists_queryset)
),
)
return Response(UserGroupInvitationSerializer(group_invitation).data)
| 36.065574
| 87
| 0.637792
|
07b92d615923dd63cf15c6084f546c4601927de3
| 17,896
|
py
|
Python
|
data/dql/script47/20210129-091207/script47.py
|
lbaiao/sys-simulator-2
|
94f00d43309fe7b56dac5099bd4024695ba317b6
|
[
"MIT"
] | 1
|
2020-06-14T13:50:28.000Z
|
2020-06-14T13:50:28.000Z
|
data/dql/script47/20210129-091207/script47.py
|
lbaiao/sys-simulator-2
|
94f00d43309fe7b56dac5099bd4024695ba317b6
|
[
"MIT"
] | null | null | null |
data/dql/script47/20210129-091207/script47.py
|
lbaiao/sys-simulator-2
|
94f00d43309fe7b56dac5099bd4024695ba317b6
|
[
"MIT"
] | null | null | null |
# Similar to script .
# Uses CompleteEnvironment10dB
# Centralized Learning-Distributed Execution
# Simulates many times, for different number of agents, and take the averages.
# There are different channels to the BS and to the devices.
# Multiple episodes convergence. Everything is in dB.
# One NN is trained and copied to each agent.
from shutil import copyfile
from sys_simulator.general import make_dir_timestamp, save_with_pickle
import matplotlib.pyplot as plt
from sys_simulator.plots import plot_positions_actions_pie
from time import time
from sys_simulator.general import db_to_power, power_to_db
from sys_simulator.channels import BANChannel, UrbanMacroNLOSWinnerChannel
from sys_simulator import general as gen
from sys_simulator.q_learning.environments.completeEnvironment10dB \
import CompleteEnvironment10dB
from sys_simulator.dqn.agents.dqnAgent import ExternalDQNAgent
from sys_simulator.dqn.externalDQNFramework import ExternalDQNFramework
from sys_simulator.parameters.parameters import \
EnvironmentParameters, TrainingParameters, DQNAgentParameters
from sys_simulator.q_learning.rewards import dis_reward_tensor_db
from copy import deepcopy
import torch
import numpy as np
import pickle
n_mues = 1 # number of mues
n_d2d = 2 # number of d2d pairs
n_rb = n_mues # number of RBs
carrier_frequency = 2.4 # carrier frequency in GHz
bs_radius = 500 # bs radius in m
rb_bandwidth = 180*1e3 # rb bandwidth in Hz
d2d_pair_distance = 50 # d2d pair distance in m
device_height = 1.5 # mobile devices height in m
bs_height = 25 # BS antenna height in m
p_max = 40 # max tx power in dBm
noise_power = -116 # noise power per RB in dBm
bs_gain = 17 # macro bs antenna gain in dBi
user_gain = 4 # user antenna gain in dBi
sinr_threshold_train = 6 # mue sinr threshold in dB for training
mue_margin = 200 # mue margin in dB
# conversions from dBm to dB
p_max = p_max - 30
noise_power = noise_power - 30
# channel parameters
CHANNEL_RND = True
# q-learning parameters
# training
NUMBER = 1
# exec params
STEPS_PER_EPISODE = 5
TEST_STEPS_PER_EPISODE = 5
MAX_NUM_EPISODES = 1500 # medium training
ITERATIONS_PER_NUM_AGENTS = 100
EVAL_EVERY = 80
EVAL_NUM_EPISODES = 100
EVAL_STEPS_PER_EPISODE = 5
# debug params
# STEPS_PER_EPISODE = 2
# TEST_STEPS_PER_EPISODE = 2
# MAX_NUM_EPISODES = 10
# ITERATIONS_PER_NUM_AGENTS = 10
# EVAL_EVERY = 1000
# EVAL_NUM_EPISODES = 2
# EVAL_STEPS_PER_EPISODE = 2
# common
EPSILON_INITIAL = 1
EPSILON_MIN = .05
# EPSILON_DECAY = .9*1e-4 # medium training
EPSILON_DECAY = 1.3 / (MAX_NUM_EPISODES * STEPS_PER_EPISODE) # medium training
GAMMA = 0.9 # Discount factor
C = 1 # C constant for the improved reward function
TARGET_UPDATE = 10
REPLAY_MEMORY_SIZE = 10000
BATCH_SIZE = 64
HIDDEN_SIZE = 128
NUM_HIDDEN_LAYERS = 1
LEARNING_RATE = 1e-2
REWARD_PENALTY = 1.5
ENVIRONMENT_MEMORY = 2
MAX_NUMBER_OF_AGENTS = 5
max_d2d = MAX_NUMBER_OF_AGENTS
range_n_d2d = range(1, max_d2d + 1, 1)
# more parameters
# linear discretization
# actions = power_to_db(np.linspace(
# db_to_power(p_max-20), db_to_power(p_max-10), 10
# ))
# db discretization
actions = power_to_db(
np.linspace(
1e-6, db_to_power(p_max-10), 10
)
)
env_params = EnvironmentParameters(
rb_bandwidth, d2d_pair_distance, p_max, noise_power,
bs_gain, user_gain, sinr_threshold_train,
n_mues, n_d2d, n_rb, bs_radius, c_param=C, mue_margin=mue_margin
)
params = TrainingParameters(MAX_NUM_EPISODES, STEPS_PER_EPISODE)
agent_params = DQNAgentParameters(
EPSILON_MIN, EPSILON_DECAY, EPSILON_INITIAL, REPLAY_MEMORY_SIZE,
BATCH_SIZE, GAMMA
)
reward_function = dis_reward_tensor_db
channel_to_devices = BANChannel(rnd=CHANNEL_RND)
channel_to_bs = UrbanMacroNLOSWinnerChannel(
rnd=CHANNEL_RND, f_c=carrier_frequency, h_bs=bs_height, h_ms=device_height
)
ref_env = CompleteEnvironment10dB(
env_params,
reward_function,
channel_to_bs,
channel_to_devices,
reward_penalty=REWARD_PENALTY,
memory=ENVIRONMENT_MEMORY,
bs_height=bs_height
)
# foo env and foo agents stuff
foo_env = deepcopy(ref_env)
foo_agents = [ExternalDQNAgent(agent_params, [1]) for _ in range(4)]
foo_env.build_scenario(foo_agents)
_, _ = foo_env.step(foo_agents)
env_state_size = foo_env.get_state_size(foo_agents[0])
def train(start):
global actions
framework = ExternalDQNFramework(
agent_params,
env_state_size,
len(actions),
HIDDEN_SIZE,
NUM_HIDDEN_LAYERS,
LEARNING_RATE
)
best_reward = float('-inf')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
mue_spectral_eff_bag = list()
d2d_spectral_eff_bag = list()
rewards_bag = list()
# aux_range = range(max_d2d+1)[1:]
epsilon = agent_params.start_epsilon
for episode in range(MAX_NUM_EPISODES):
env = deepcopy(ref_env)
n_agents = np.random.choice(range_n_d2d)
now = (time() - start) / 60
print(
'Training. ' +
f'Number of agents: {n_agents}. ' +
f'Episode: {episode}/{MAX_NUM_EPISODES-1}. ' +
f'Epsilon: {epsilon}. '
f'Elapsed time: {now} minutes.'
)
agents = [ExternalDQNAgent(agent_params, actions)
for _ in range(n_agents)] # 1 agent per d2d tx
for a in agents:
a.set_epsilon(epsilon)
env.build_scenario(agents)
obs, _ = env.step(agents)
total_reward = 0.0
i = 0
bag = list()
while True:
if i >= params.steps_per_episode:
break
else:
past_actions = torch.zeros([len(agents)], device=device)
for j, agent in enumerate(agents):
agent.get_action(framework, obs[j].float())
past_actions[j] = agent.action_index
# # debugging
# if len(agents) == 2:
# print('debugging')
# aux1 = agents[0].action_index == 9
# aux2 = agents[1].action_index == 5
# aux = [aux1, aux2]
# if np.mean(aux) == 1:
# print('debugging')
next_obs, rewards = env.step(agents)
i += 1
for j, agent in enumerate(agents):
framework.replay_memory.push(
obs[j].float(), past_actions[j],
next_obs[j].float(), rewards[j]
)
framework.learn()
total_reward = np.sum(rewards)
bag.append(total_reward.item())
obs = next_obs
if i % TARGET_UPDATE == 0:
framework.target_net.load_state_dict(
framework.policy_net.state_dict()
)
if total_reward > best_reward:
best_reward = total_reward
epsilon = agents[0].epsilon
if episode % EVAL_EVERY == 0:
r, d_speff, m_speff = in_training_test(framework, device)
rewards_bag.append(r)
# average d2d spectral eff
d2d_spectral_eff_bag.append(d_speff)
# mue spectral eff
mue_spectral_eff_bag.append(m_speff)
# save stuff
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
data_path = f'models/dql/{filename}.pt'
torch.save(framework.policy_net.state_dict(), data_path)
# Return the trained policy
return framework, rewards_bag, d2d_spectral_eff_bag, mue_spectral_eff_bag, epsilon # noqa
def test(n_agents, test_env, framework):
framework.policy_net.eval()
mue_spectral_effs = []
d2d_spectral_effs = []
rewards_bag = []
# jain_index = [list() for _ in range(max_d2d+1)]
bag = list()
agents = [ExternalDQNAgent(agent_params, actions)
for i in range(n_agents)] # 1 agent per d2d tx
test_env.build_scenario(agents)
obs, _ = test_env.step(agents)
total_reward = 0.0
i = 0
while True:
actions_index = list()
for j, agent in enumerate(agents):
aux = agent.act(framework, obs[j].float()).max(1)
agent.set_action(aux[1].long(),
agent.actions[aux[1].item()])
bag.append(aux[1].item())
actions_index.append(aux[1].item())
next_obs, rewards = test_env.step(agents)
obs = next_obs
total_reward = sum(rewards)
# saving stuff
rewards_bag.append(total_reward)
mue_spectral_effs.append(test_env.mue_spectral_eff.item())
d2d_spectral_effs.append(test_env.d2d_spectral_eff.item())
i += 1
if i >= TEST_STEPS_PER_EPISODE:
break
mue_success_rate = np.mean(
np.array(mue_spectral_effs) > np.log2(
1 + db_to_power(sinr_threshold_train)
)
)
# jain_index_avg = list()
# for i, j in enumerate(jain_index):
# jain_index_avg.append(np.average(j))
# save data
return mue_success_rate, mue_spectral_effs, d2d_spectral_effs, rewards
def in_training_test(framework: ExternalDQNFramework, device: torch.device):
mue_spectral_eff_bag = list()
d2d_spectral_eff_bag = list()
rewards_bag = list()
for _ in range(EVAL_NUM_EPISODES):
env = deepcopy(ref_env)
n_agents = np.random.choice(range_n_d2d)
agents = [ExternalDQNAgent(agent_params, actions)
for _ in range(n_agents)] # 1 agent per d2d tx
env.build_scenario(agents)
obs, _ = env.step(agents)
for _ in range(EVAL_STEPS_PER_EPISODE):
for j, agent in enumerate(agents):
aux = agent.act(framework, obs[j].float()).max(1)
agent.set_action(aux[1].long(),
agent.actions[aux[1].item()])
next_obs, _ = env.step(agents)
obs = next_obs
# mue spectral eff
mue_spectral_eff_bag.append(env.mue_spectral_eff)
# average d2d spectral eff
d2d_spectral_eff_bag.append(env.d2d_spectral_eff)
rewards_bag.append(env.reward)
mean_mue_speff = np.mean(mue_spectral_eff_bag)
mean_d2d_speff = np.mean(d2d_spectral_eff_bag)
mean_reward = np.mean(rewards_bag)
return mean_reward, mean_d2d_speff, mean_mue_speff
def run(framework=None):
mue_sucess_rate_total = []
mue_spectral_effs_total = []
d2d_spectral_effs_total = []
rewards_total = []
start = time()
r, d_speffs, m_speffs, epsilon = 0, 0, 0, 1
if framework is None:
framework, r, d_speffs, m_speffs, epsilon = train(start)
for n in range(1, MAX_NUMBER_OF_AGENTS+1, 1):
mue_suc_rates = []
mue_speff_rates = []
d2d_speff_rates = []
rews = []
for it in range(ITERATIONS_PER_NUM_AGENTS):
now = (time() - start) / 60
print(
'Testing. ' +
f'Number of agents: {n}/{MAX_NUMBER_OF_AGENTS}. ' +
f'Iteration: {it}/{ITERATIONS_PER_NUM_AGENTS-1}. ' +
f'Elapsed time: {now} minutes.'
)
test_env = deepcopy(ref_env)
mue_success_rate, mue_spectral_effs, d2d_spectral_effs, rewards = \
test(n, test_env, framework)
mue_suc_rates.append(mue_success_rate)
mue_speff_rates.append(mue_spectral_effs)
d2d_speff_rates.append(d2d_spectral_effs)
rews.append(rewards)
mue_sucess_rate_total.append(mue_suc_rates)
mue_spectral_effs_total.append(mue_speff_rates)
d2d_spectral_effs_total.append(d2d_speff_rates)
rewards_total.append(rews)
# save stuff
now = (time() - start) / 60
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
dir_path = f'data/dql/{filename}'
data_path = make_dir_timestamp(dir_path)
data_file_path = f'{data_path}/log.pickle'
data = {
'mue_success_rate': mue_sucess_rate_total,
'd2d_speffs': d2d_spectral_effs_total,
'mue_speffs': mue_spectral_effs_total,
'rewards': rewards_total,
'mue_sinr_threshold': sinr_threshold_train,
'elapsed_time': now,
'training_rewards': r,
'training_d2d_speffs': d_speffs,
'training_mue_speffs': m_speffs,
'eval_every': EVAL_EVERY,
'final_epsilon': epsilon,
}
save_with_pickle(data, data_file_path)
copyfile(__file__, f'{data_path}/{filename}.py')
print(f'done. Elapsed time: {now} minutes.')
def run_test():
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
data_path = f'models/dql/{filename}.pt'
framework = torch.load(data_path)
run(framework)
def test_exec():
# environment
test_env = deepcopy(ref_env)
# load framework
framework = ExternalDQNFramework(
agent_params,
env_state_size,
len(actions),
HIDDEN_SIZE,
NUM_HIDDEN_LAYERS,
LEARNING_RATE
)
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
data_path = f'models/dql/{filename}.pt'
state_dict = torch.load(data_path)
framework.policy_net.load_state_dict(state_dict)
framework.policy_net.eval()
# simulation stuff
mue_spectral_effs = []
d2d_spectral_effs = []
rewards_bag = []
# devices positions
pairs_positions = [
((-400, 0, device_height), (-450, 0, device_height)),
((100, 0, device_height), (150, 0, device_height)),
((225, 225, device_height), (275, 225, device_height)),
((55, -55, device_height), (55, -5, device_height)),
]
mue_position = (0, 200, device_height)
# jain_index = [list() for _ in range(max_d2d+1)]
n_agents = len(pairs_positions)
bag = list()
agents = [ExternalDQNAgent(agent_params, actions)
for i in range(n_agents)] # 1 agent per d2d tx
test_env.set_scenario(pairs_positions, mue_position, agents)
obs, _ = test_env.step(agents)
total_reward = 0.0
i = 0
while True:
actions_index = list()
for j, agent in enumerate(agents):
aux = agent.act(framework, obs[j].float()).max(1)
agent.set_action(aux[1].long(),
agent.actions[aux[1].item()])
bag.append(aux[1].item())
actions_index.append(aux[1].item())
next_obs, rewards = test_env.step(agents)
obs = next_obs
total_reward = sum(rewards)
# saving stuff
rewards_bag.append(total_reward)
mue_spectral_effs.append(test_env.mue_spectral_eff.item())
d2d_spectral_effs.append(test_env.d2d_spectral_eff.item())
i += 1
if i >= TEST_STEPS_PER_EPISODE:
break
d2d_txs, d2d_rxs = zip(*test_env.d2d_pairs)
# D2D interference on the MUE, in dB
d2d_interferences = np.array([
d.caused_mue_interference for d in d2d_txs
])
d2d_interferences_mag = db_to_power(d2d_interferences)
d2d_total_interference = np.sum(d2d_interferences_mag)
percentage_interferences = d2d_interferences_mag / d2d_total_interference
interferences, tx_labels, rx_labels = calculate_interferences(test_env)
if d2d_total_interference != 0:
plot_positions_actions_pie(
test_env.bs, test_env.mue, d2d_txs, d2d_rxs,
actions_index, percentage_interferences,
test_env.mue.sinr > sinr_threshold_train, sinr_threshold_train,
test_env.reward, interferences, tx_labels, rx_labels
)
# jain_index[n_agents].append(gen.jain_index(test_env.sinr_d2ds))
mue_success_rate = np.mean(
np.array(mue_spectral_effs) > np.log2(
1 + db_to_power(sinr_threshold_train)
)
)
# jain_index_avg = list()
# for i, j in enumerate(jain_index):
# jain_index_avg.append(np.average(j))
# save data
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
data_path = f'data/dql/{filename}_exec.pickle'
data = {
'd2d_speffs_avg_total': d2d_spectral_effs,
'mue_success_rate': mue_success_rate,
'chosen_actions': bag,
'd2d_speffs': d2d_spectral_effs,
'mue_speffs': mue_spectral_effs,
'rewards': rewards_bag,
'mue_sinr_threshold': sinr_threshold_train,
}
with open(data_path, 'wb') as file:
pickle.dump(data, file)
# plot
print_stuff(actions, test_env)
plt.show()
def calculate_interferences(env: CompleteEnvironment10dB):
bs = env.bs
mue = env.mue
d2d_pairs = env.d2d_pairs
txs = [mue]
txs += [p[0] for p in d2d_pairs]
rxs = [bs]
rxs += [p[1] for p in d2d_pairs]
interferences = np.zeros((len(txs), len(rxs)))
for i, tx in enumerate(txs):
for j, (rx, interfered) in enumerate(zip(rxs, txs)):
if tx == interfered:
interf = tx.power_at_receiver
elif tx == mue:
interf = interfered.received_mue_interference
elif rx == bs:
interf = tx.caused_mue_interference
else:
interf = [
power_to_db(i[1]) for i in interfered.interferences
if i[0] == tx.id
][0]
interferences[i][j] = interf
tx_labels = [d.id for d in txs]
rx_labels = [d.id for d in rxs]
return interferences, tx_labels, rx_labels
def print_stuff(actions, env: CompleteEnvironment10dB):
actions = [f'{i:.2f}' for i in actions]
sinr_d2ds = [f'{d[0].sinr:.2f}' for d in env.d2d_pairs]
print(f'MUE Tx Power [dBW]: {env.mue.tx_power:.2f}')
print(f'D2D Power levels [dBW]: {actions}')
print(f'D2D SINR [dB]: {sinr_d2ds}')
print(f'D2D Spectral Efficiencies: {env.d2d_spectral_eff}')
if __name__ == '__main__':
run()
| 36.153535
| 94
| 0.643328
|
3a7b95d9a7da73e127da386180e8ef34b9968632
| 22
|
py
|
Python
|
btd6_memory_info/generated/Assets/Scripts/Data/Cosmetics/BloonDecals/BloonDecal/bloon_decal.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
btd6_memory_info/generated/Assets/Scripts/Data/Cosmetics/BloonDecals/BloonDecal/bloon_decal.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
btd6_memory_info/generated/Assets/Scripts/Data/Cosmetics/BloonDecals/BloonDecal/bloon_decal.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
class BloonDecal: pass
| 22
| 22
| 0.863636
|
e0829149dfa200e179b7ece34dc3be81e70d02fe
| 4,300
|
py
|
Python
|
cooking_assistant/settings.py
|
rclsilver/cooking-assistant
|
440f762977d5517f152b8d4b520c590e2cf0eca6
|
[
"Apache-2.0"
] | 1
|
2020-12-01T16:12:28.000Z
|
2020-12-01T16:12:28.000Z
|
cooking_assistant/settings.py
|
rclsilver/cooking-assistant
|
440f762977d5517f152b8d4b520c590e2cf0eca6
|
[
"Apache-2.0"
] | null | null | null |
cooking_assistant/settings.py
|
rclsilver/cooking-assistant
|
440f762977d5517f152b8d4b520c590e2cf0eca6
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for cooking_assistant project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('DJANGO_SECRET_KEY', 'mnaty#uu5ozakfsdvcc29+07_c(pk1e(dxkjf+ni-ud1)ejjm&')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('DJANGO_DEBUG', 'true').lower() in ['true', '1']
ALLOWED_HOSTS = []
if os.getenv('DJANGO_ALLOWED_HOSTS', None) is not None:
for allowed_host in os.getenv('DJANGO_ALLOWED_HOSTS').split(','):
ALLOWED_HOSTS.append(allowed_host)
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'health_check',
'health_check.db',
'rest_framework',
]
MIDDLEWARE = [
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cooking_assistant.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cooking_assistant.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {}
if os.getenv('POSTGRES_DB'):
DATABASES['default'] = {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.getenv('POSTGRES_DB'),
'USER': os.getenv('POSTGRES_USER'),
'PASSWORD': os.getenv('POSTGRES_PASSWORD'),
'HOST': os.getenv('POSTGRES_HOST'),
'PORT': os.getenv('POSTGRES_PORT'),
}
else:
DATABASES['default'] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = os.getenv('DJANGO_LANGUAGE_CODE', 'en-us')
TIME_ZONE = os.getenv('DJANGO_TIME_ZONE', 'UTC')
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Celery
CELERY_BROKER_URL = os.getenv('CELERY_BROKER_URL', 'amqp://guest:guest@localhost:5672//')
| 29.054054
| 97
| 0.701163
|
8fef4b49ce6533109de100a0f55eeeb54e6c5885
| 3,822
|
py
|
Python
|
dicoms/forms.py
|
DCAN-Labs/bids-tryworks
|
aa52ec3d7e9a95d07fcf559ef98351529ae972c6
|
[
"BSD-3-Clause"
] | null | null | null |
dicoms/forms.py
|
DCAN-Labs/bids-tryworks
|
aa52ec3d7e9a95d07fcf559ef98351529ae972c6
|
[
"BSD-3-Clause"
] | 27
|
2020-06-26T23:04:08.000Z
|
2022-02-10T10:46:55.000Z
|
dicoms/forms.py
|
DCAN-Labs/bids-tryworks
|
aa52ec3d7e9a95d07fcf559ef98351529ae972c6
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from os.path import basename, normpath
from bootstrap_datepicker_plus import DatePickerInput
from django import forms
from django.utils.translation import ugettext_lazy as _
from drf_braces.serializers.form_serializer import FormSerializer
from dicoms.models import Search, Series
class SearchForm(forms.ModelForm):
class Meta:
model = Search
fields = "__all__"
labels = {
'subject_search': _('Patient ID'),
'study_search': _('Study Description'),
'date_range_alpha': _('Start date'),
'date_range_omega': _('End date')
}
help_texts = {
'date_range_alpha': _('Enter study start date in format YYYY-MM-DD (Not required)'),
'date_range_omega': _('Enter study end date in format YYYY-MM-DD (Not required)'),
'multi_search': _('Search for multiple subjects by uploading a .txt file with one patient ID per line')
}
widgets = {
'date_range_alpha': DatePickerInput(format='%Y-%m-%d'),
'date_range_omega': DatePickerInput(format='%Y-%m-%d')
}
class SerializedSearchForm(FormSerializer):
class Meta(object):
form = SearchForm
def make_conversion_form(session_id):
"""
This is a form class generator, but I'm not sure if it's the best way to make dynamic
forms.
I'm going to attempt to create a dynamic form more directly below in
ConversionForm2
:param session_id:
:return:
"""
if Series.objects.filter(Session=session_id).exists():
series_from_session = Series.objects.filter(Session=session_id)
# loading choices for scan types from bidspec
with open("dicoms/static/jss/bids_spec.json") as infile:
bidspec = json.load(infile)
scan_choices = bidspec['anat'] + bidspec['func'] + bidspec['fmap']
scan_choices.sort()
# creating a tuple list to pass to our form's select widget
# django requires a tuple so we're making one
tuple_scan_choices = [(scan, scan) for scan in scan_choices]
fields = {}
list_of_series = []
# cleaning up path to get last dir/series name
for each in series_from_session:
list_of_series.append(each.Path)
cleaned_series = [basename(normpath(single_series)) for single_series in list_of_series]
cleaned_series_set = set(cleaned_series)
cleaned_series = list(cleaned_series_set)
for series in cleaned_series:
fields[series] = forms.Select(choices=tuple_scan_choices)
return type("ConversionForm", (forms.BaseForm,), {'base_fields': fields})
else:
return None
class ConversionForm2(forms.Form):
name = forms.CharField(max_length=255)
def __init__(self, session):
super(ConversionForm2, self).__init__(session)
series_from_session = Series.objects.filter(Session=session)
bidspec = json.load(open("dicoms/bids_spec.json"))
scan_choices = bidspec['anat'] + bidspec['func'] + bidspec['fmap']
scan_choices.sort()
# creating a tuple list to pass to our form's select widget
# django requires a tuple so we're making one
tuple_scan_choices = [(scan, scan) for scan in scan_choices]
fields = {}
list_of_series = []
# cleaning up path to get last dir/series name
for each in series_from_session:
list_of_series.append(each.Path)
cleaned_series = [basename(normpath(single_series)) for single_series in list_of_series]
cleaned_series_set = set(cleaned_series)
cleaned_series = list(cleaned_series_set)
# for series in cleaned_series:
# fields[series] = forms.Select(tuple_scan_choices)
# self.fields = fields
| 35.06422
| 115
| 0.658817
|
15ecc5c5b6e198f05b8d3db2b443339913a9950e
| 1,048
|
py
|
Python
|
actions.py
|
piniaplai/weather-bot
|
fac4b658e306973a7e6e5e9a6eeddab885bbd9e2
|
[
"MIT"
] | null | null | null |
actions.py
|
piniaplai/weather-bot
|
fac4b658e306973a7e6e5e9a6eeddab885bbd9e2
|
[
"MIT"
] | null | null | null |
actions.py
|
piniaplai/weather-bot
|
fac4b658e306973a7e6e5e9a6eeddab885bbd9e2
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from rasa_core.actions.action import Action
# https://pypi.org/project/weather-api/
from weather import Weather, Unit
class ActionGetWeather(Action):
def name(self):
return 'action_get_weather'
def run(self, dispatcher, tracker, domain):
weather = Weather(unit=Unit.CELSIUS)
gpe = ('Auckland', tracker.get_slot('GPE'))[bool(tracker.get_slot('GPE'))]
result = weather.lookup_by_location(gpe)
if result:
condition = result.condition
city = result.location.city
country = result.location.country
dispatcher.utter_message('It\'s ' + condition.text + ' and ' + condition.temp + 'C in ' +
city + ', ' + country + '.')
else:
dispatcher.utter_message('We did not find any weather information for ' + gpe + '. Search by a city name.')
return
| 36.137931
| 119
| 0.645992
|
32c1647359d63a2c5aeed8f45a60906d4713ee17
| 997
|
py
|
Python
|
src/metropolisnonlin.py
|
zhang-changwei/Phase-Transition-Through-Quenching
|
ff47d3a0209c6121eb570b74003934358bc17482
|
[
"MIT"
] | null | null | null |
src/metropolisnonlin.py
|
zhang-changwei/Phase-Transition-Through-Quenching
|
ff47d3a0209c6121eb570b74003934358bc17482
|
[
"MIT"
] | null | null | null |
src/metropolisnonlin.py
|
zhang-changwei/Phase-Transition-Through-Quenching
|
ff47d3a0209c6121eb570b74003934358bc17482
|
[
"MIT"
] | null | null | null |
import numpy as np
from matplotlib import pyplot as plt
import MC
rng = np.random.default_rng()
L = 12
TC, BETA, NU = 2/(np.log(1+np.sqrt(2))), 1/8, 1
TI = 1.5*TC
R = 2
MCBIN = L**2
REPEAT = 100
# tauq = np.logspace(0, 4, num=41, base=10, endpoint=True)
tauq = np.logspace(0, 3, num=31, base=10, endpoint=True)
v = 0.5/tauq**R
m2ave = np.array([], dtype=np.float64)
m2std = np.array([], dtype=np.float64)
for j in range(tauq.size):
m20 = np.array([], dtype=np.float64)
for i in range(REPEAT):
m2 = MC.quench_metropolis2(L, TC, MCBIN, tauq[j], R)
m20 = np.append(m20, m2)
print (j, i, m2)
m2ave = np.append(m2ave, np.average(m20))
m2std = np.append(m2std, np.std(m20))
np.save("data\\MnonlinL12_m2ave.npy", m2ave)
np.save("data\\MnonlinL12_m2std.npy", m2std)
np.save("data\\MnonlinL12_tauq.npy", tauq)
np.save("data\\MnonlinL12_v.npy", v)
plt.figure()
plt.plot(1/v, m2ave*L**2, label="L=12")
plt.legend()
plt.xscale("log")
plt.yscale("log")
plt.show()
| 25.564103
| 60
| 0.643932
|
e34895e80d1cc83bfd1244e8e8571cd8adb61f10
| 1,012
|
py
|
Python
|
src/Visualization/Genre_Accuracy.py
|
Sunhick/music-cognita
|
e90d9ed8122f39bce3e239518df696b35cd6fda8
|
[
"MIT"
] | 2
|
2017-10-05T15:45:23.000Z
|
2021-02-18T13:45:27.000Z
|
src/Visualization/Genre_Accuracy.py
|
Sunhick/music-cognita
|
e90d9ed8122f39bce3e239518df696b35cd6fda8
|
[
"MIT"
] | null | null | null |
src/Visualization/Genre_Accuracy.py
|
Sunhick/music-cognita
|
e90d9ed8122f39bce3e239518df696b35cd6fda8
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
N = 6
Accuracy = (53.76,47.58,24.52,58.37,41.73,49.64)
Error = (100 - 53.76,100 - 47.58,100 - 24.52,100 - 58.37,100 - 41.73,100 - 49.64)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, Accuracy, width, color='b')
rects2 = ax.bar(ind + width, Error, width, color='y')
# add some text for labels, title and axes ticks
ax.set_ylabel('Accuracy')
ax.set_title('Classifier Vs Accuracy')
ax.set_xticks(ind+width)
ax.set_xticklabels(('LogReg', 'DecisionTree', 'NaiveBayes', 'RandForest', 'SVM', 'SVM+Boosting'))
ax.legend((rects1[0], rects2[0]), ('Accuracy', 'Error'))
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%d' % int(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
plt.show()
| 30.666667
| 97
| 0.645257
|
2071fb20deb90c86df61a70d3128b08748cdb74a
| 5,070
|
py
|
Python
|
tests/test_apk_patch_size_estimator.py
|
isabella232/apk-patch-size-estimator
|
5d923c259b2fd6023cd5d2a88be4bc25fa23cdb5
|
[
"Apache-2.0"
] | 346
|
2016-04-30T21:26:28.000Z
|
2022-03-30T21:31:59.000Z
|
tests/test_apk_patch_size_estimator.py
|
googlesamples/apk-patch-size-estimator
|
5d923c259b2fd6023cd5d2a88be4bc25fa23cdb5
|
[
"Apache-2.0"
] | 4
|
2017-01-20T14:04:24.000Z
|
2020-05-21T19:56:13.000Z
|
tests/test_apk_patch_size_estimator.py
|
isabella232/apk-patch-size-estimator
|
5d923c259b2fd6023cd5d2a88be4bc25fa23cdb5
|
[
"Apache-2.0"
] | 48
|
2016-05-01T15:08:55.000Z
|
2020-11-14T20:39:06.000Z
|
#!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import apk_patch_size_estimator
from mock import patch
import os
import subprocess
import unittest
import hashlib
BUF_SIZE = 1 << 16
RANDOM_FILE = 'tests/random_file'
RANDOM_FILE_SIZE = 1 << 16
ZIP1 = 'tests/1.zip'
ZIP2 = 'tests/2.zip'
TMP = '/tmp'
def sha1(filename):
accumulator = hashlib.sha1()
file = open(filename, "rb")
data = file.read(BUF_SIZE)
while data:
accumulator.update(data)
data = file.read(BUF_SIZE)
return accumulator.hexdigest()
class TestCalculates(unittest.TestCase):
def setUp(self):
apk_patch_size_estimator.find_bins_or_die()
def test_find_binary_success(self):
with patch.object(subprocess, 'check_output', return_value=''):
apk_patch_size_estimator.find_binary('ls')
subprocess.check_output.assert_any_call(['which', 'ls'])
def test_find_binary_fail(self):
with self.assertRaises(Exception) as context:
apk_patch_size_estimator.find_binary('does_not_extist_command')
self.assertEqual(
context.exception.message,
'No "does_not_extist_command" on PATH, please install or fix PATH.')
def test_bsdiff(self):
bsdiff_patch_path = apk_patch_size_estimator.bsdiff(
ZIP1,
ZIP2,
TMP)
# Obtained by compute bsdiff of 1.zip and 2.zip
# Strip first 32 bytes
# bunzip2 the rest
# attach the 32 bytes back
# Compute sha1sum
expected_sha1 = "bd7434d2fbdcca1d6e346cd9441ce1c7fbdc3200"
self.assertTrue(os.path.exists(bsdiff_patch_path))
self.assertEqual(sha1(bsdiff_patch_path), expected_sha1)
os.remove(bsdiff_patch_path)
def test_filebyfile(self):
filebyfile_patch_path = apk_patch_size_estimator.filebyfile(
ZIP1,
ZIP2,
TMP)
# Obtained by running
# java -jar lib/file-by-file-tools.jar --generate --old tests/1.zip \
# --new tests/2.zip --patch patch && sha1sum patch && rm patch
expected_sha1 = "6fd285a07a4d5256a8b46a233dbf7acb360e59c8"
self.assertTrue(os.path.exists(filebyfile_patch_path))
self.assertEqual(sha1(filebyfile_patch_path), expected_sha1)
os.remove(filebyfile_patch_path)
def test_gzip(self):
gzipped_path = apk_patch_size_estimator.gzip(RANDOM_FILE)
# Obtained by running
# gzip -9 < tests/random_file | sha1sum
expected_sha1 = "720ade7137c1ae830272a8a3d04e90f337edce5f"
self.assertTrue(os.path.exists(gzipped_path))
self.assertEqual(sha1(gzipped_path), expected_sha1)
os.remove(gzipped_path)
def test_brotli(self):
brotlied_path = apk_patch_size_estimator.brotli(RANDOM_FILE)
# Obtained by running
# brotli -c tests/random_file | sha1sum
expected_sha1 = "bf1f64442ca5f0c6d58874dcdccc0b4045521823"
self.assertTrue(os.path.exists(brotlied_path))
self.assertEqual(sha1(brotlied_path), expected_sha1)
os.remove(brotlied_path)
def test_get_size(self):
self.assertEqual(apk_patch_size_estimator.get_size(RANDOM_FILE),
RANDOM_FILE_SIZE)
def test_no_diff(self):
no_diff_patch_path = apk_patch_size_estimator.no_diff(ZIP1, ZIP2, TMP)
self.assertTrue(os.path.exists(no_diff_patch_path))
self.assertEqual(sha1(no_diff_patch_path), sha1(ZIP2))
self.assertNotEqual(no_diff_patch_path, ZIP2)
os.remove(no_diff_patch_path)
def test_no_compress(self):
no_compress_path = apk_patch_size_estimator.no_compress(RANDOM_FILE)
self.assertTrue(os.path.exists(no_compress_path))
self.assertEqual(sha1(no_compress_path), sha1(RANDOM_FILE))
self.assertNotEqual(no_compress_path, RANDOM_FILE)
os.remove(no_compress_path)
def test_human_file_size(self):
self.assertEqual(
apk_patch_size_estimator.human_file_size(0), '0B')
self.assertEqual(
apk_patch_size_estimator.human_file_size(100), '100B')
self.assertEqual(
apk_patch_size_estimator.human_file_size(1024), '1KB')
self.assertEqual(
apk_patch_size_estimator.human_file_size(1048576), '1MB')
self.assertEqual(
apk_patch_size_estimator.human_file_size(1073741824), '1GB')
self.assertEqual(
apk_patch_size_estimator.human_file_size(1099511627776), '1TB')
self.assertEqual(
apk_patch_size_estimator.human_file_size(1981633), '1.89MB')
self.assertEqual(
apk_patch_size_estimator.human_file_size(15654267), '14.9MB')
self.assertEqual(
apk_patch_size_estimator.human_file_size(353244297), '337MB')
if __name__ == '__main__':
unittest.main()
| 33.8
| 76
| 0.739448
|
fcd53feedbc3d78885b6513029eb0fcb0448a850
| 10,241
|
py
|
Python
|
venv/Lib/site-packages/um/visuals/color.py
|
DarkEyeBr/Python
|
f45239551d19f49eac35185e4f72b067d5820f3a
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/um/visuals/color.py
|
DarkEyeBr/Python
|
f45239551d19f49eac35185e4f72b067d5820f3a
|
[
"MIT"
] | 1
|
2019-04-12T18:55:02.000Z
|
2019-04-12T18:55:02.000Z
|
venv/Lib/site-packages/um/visuals/color.py
|
DarkEyeBr/Python
|
f45239551d19f49eac35185e4f72b067d5820f3a
|
[
"MIT"
] | null | null | null |
"""
Module containing color utilities and color constants.
Source of colors - Google Material Design
https://material.io/guidelines/style/color.html#color-color-palette
"""
import re
class Color:
def __init__(self, r: int =0x0, g: int=0x0, b: int =0x0):
if any(value < 0 or value > 255 for value in (r, g, b)):
raise ValueError('RGB color values have to be in range 0-255')
self.r = r
self.g = g
self.b = b
def hex(self, prefix: str=''):
return hex((self.r, self.g, self.b), prefix)
def hex2rgb(hex_code: str) -> tuple:
"""
Convert color given in hex code to RGB in ints. Result is returned inside 3-element tuple.
:param hex_code:
:return: tuple (R, G, B)
"""
pattern = re.compile(r'^#?[a-fA-F0-9]{6}$')
if not re.match(pattern, hex_code):
raise ValueError('Hex code should have 6 characters')
h = hex_code.lstrip('#')
return tuple(int(h[i: i + 2], 16) for i in (0, 2, 4))
def hex(color: tuple, prefix: str='#') -> str:
"""
Convert RGB to HEX.
:param color: 3-element tuple with color RGB values
:param prefix: string prefix
:return: string with color in hex
"""
if len(color) is not 3:
raise ValueError('Color should be a 3 element tuple')
if not all([0 <= v <= 255 for v in color]):
raise ValueError('RGB values have to be in range from 0 to 255')
return '{}{:02x}{:02x}{:02x}'.format(prefix, color[0], color[1], color[2])
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
# Red
RED = hex2rgb('#F44336')
RED50 = hex2rgb('#FFEBEE')
RED100 = hex2rgb('#FFCDD2')
RED200 = hex2rgb('#EF9A9A')
RED300 = hex2rgb('#E57373')
RED400 = hex2rgb('#EF5350')
RED500 = hex2rgb('#F44336')
RED600 = hex2rgb('#E53935')
RED700 = hex2rgb('#D32F2F')
RED800 = hex2rgb('#C62828')
RED900 = hex2rgb('#B71C1C')
REDA100 = hex2rgb('#FF8A80')
REDA200 = hex2rgb('#FF5252')
REDA400 = hex2rgb('#FF1744')
REDA700 = hex2rgb('#D50000')
# Pink
PINK = hex2rgb('#E91E63')
PINK50 = hex2rgb('#FCE4EC')
PINK100 = hex2rgb('#F8BBD0')
PINK200 = hex2rgb('#F48FB1')
PINK300 = hex2rgb('#F06292')
PINK400 = hex2rgb('#EC407A')
PINK500 = hex2rgb('#E91E63')
PINK600 = hex2rgb('#D81B60')
PINK700 = hex2rgb('#C2185B')
PINK800 = hex2rgb('#AD1457')
PINK900 = hex2rgb('#880E4F')
PINKA100 = hex2rgb('#FF80AB')
PINKA200 = hex2rgb('#FF4081')
PINKA400 = hex2rgb('#F50057')
PINKA700 = hex2rgb('#C51162')
# Purple
PURPLE = hex2rgb('#9C27B0')
PURPLE50 = hex2rgb('#F3E5F5')
PURPLE100 = hex2rgb('#E1BEE7')
PURPLE200 = hex2rgb('#CE93D8')
PURPLE300 = hex2rgb('#BA68C8')
PURPLE400 = hex2rgb('#AB47BC')
PURPLE500 = hex2rgb('#9C27B0')
PURPLE600 = hex2rgb('#8E24AA')
PURPLE700 = hex2rgb('#7B1FA2')
PURPLE800 = hex2rgb('#6A1B9A')
PURPLE900 = hex2rgb('#4A148C')
PURPLEA100 = hex2rgb('#EA80FC')
PURPLEA200 = hex2rgb('#E040FB')
PURPLEA400 = hex2rgb('#D500F9')
PURPLEA700 = hex2rgb('#AA00FF')
# Deep Purple
DEEP_PURPLE = hex2rgb('#673AB7')
DEEP_PURPLE50 = hex2rgb('#EDE7F6')
DEEP_PURPLE100 = hex2rgb('#D1C4E9')
DEEP_PURPLE200 = hex2rgb('#B39DDB')
DEEP_PURPLE300 = hex2rgb('#9575CD')
DEEP_PURPLE400 = hex2rgb('#7E57C2')
DEEP_PURPLE500 = hex2rgb('#673AB7')
DEEP_PURPLE600 = hex2rgb('#5E35B1')
DEEP_PURPLE700 = hex2rgb('#512DA8')
DEEP_PURPLE800 = hex2rgb('#4527A0')
DEEP_PURPLE900 = hex2rgb('#311B92')
DEEP_PURPLEA100 = hex2rgb('#B388FF')
DEEP_PURPLEA200 = hex2rgb('#7C4DFF')
DEEP_PURPLEA400 = hex2rgb('#651FFF')
DEEP_PURPLEA700 = hex2rgb('#6200EA')
# Indigo
INDIGO = hex2rgb('#3F51B5')
INDIGO50 = hex2rgb('#E8EAF6')
INDIGO100 = hex2rgb('#C5CAE9')
INDIGO200 = hex2rgb('#9FA8DA')
INDIGO300 = hex2rgb('#7986CB')
INDIGO400 = hex2rgb('#5C6BC0')
INDIGO500 = hex2rgb('#3F51B5')
INDIGO600 = hex2rgb('#3949AB')
INDIGO700 = hex2rgb('#303F9F')
INDIGO800 = hex2rgb('#283593')
INDIGO900 = hex2rgb('#1A237E')
INDIGOA100 = hex2rgb('#8C9EFF')
INDIGOA200 = hex2rgb('#536DFE')
INDIGOA400 = hex2rgb('#3D5AFE')
INDIGOA700 = hex2rgb('#304FFE')
# Blue
BLUE = hex2rgb('#2196F3')
BLUE50 = hex2rgb('#E3F2FD')
BLUE100 = hex2rgb('#BBDEFB')
BLUE200 = hex2rgb('#90CAF9')
BLUE300 = hex2rgb('#64B5F6')
BLUE400 = hex2rgb('#42A5F5')
BLUE500 = hex2rgb('#2196F3')
BLUE600 = hex2rgb('#1E88E5')
BLUE700 = hex2rgb('#1976D2')
BLUE800 = hex2rgb('#1565C0')
BLUE900 = hex2rgb('#0D47A1')
BLUEA100 = hex2rgb('#82B1FF')
BLUEA200 = hex2rgb('#448AFF')
BLUEA400 = hex2rgb('#2979FF')
BLUEA700 = hex2rgb('#2962FF')
# Light Blue
LIGHT_BLUE = hex2rgb('#03A9F4')
LIGHT_BLUE50 = hex2rgb('#E1F5FE')
LIGHT_BLUE100 = hex2rgb('#B3E5FC')
LIGHT_BLUE200 = hex2rgb('#81D4FA')
LIGHT_BLUE300 = hex2rgb('#4FC3F7')
LIGHT_BLUE400 = hex2rgb('#29B6F6')
LIGHT_BLUE500 = hex2rgb('#03A9F4')
LIGHT_BLUE600 = hex2rgb('#039BE5')
LIGHT_BLUE700 = hex2rgb('#0288D1')
LIGHT_BLUE800 = hex2rgb('#0277BD')
LIGHT_BLUE900 = hex2rgb('#01579B')
LIGHT_BLUEA100 = hex2rgb('#80D8FF')
LIGHT_BLUEA200 = hex2rgb('#40C4FF')
LIGHT_BLUEA400 = hex2rgb('#00B0FF')
LIGHT_BLUEA700 = hex2rgb('#0091EA')
# Cyan
CYAN = hex2rgb('#00BCD4')
CYAN50 = hex2rgb('#E0F7FA')
CYAN100 = hex2rgb('#B2EBF2')
CYAN200 = hex2rgb('#80DEEA')
CYAN300 = hex2rgb('#4DD0E1')
CYAN400 = hex2rgb('#26C6DA')
CYAN500 = hex2rgb('#00BCD4')
CYAN600 = hex2rgb('#00ACC1')
CYAN700 = hex2rgb('#0097A7')
CYAN800 = hex2rgb('#00838F')
CYAN900 = hex2rgb('#006064')
CYANA100 = hex2rgb('#84FFFF')
CYANA200 = hex2rgb('#18FFFF')
CYANA400 = hex2rgb('#00E5FF')
CYANA700 = hex2rgb('#00B8D4')
# Teal
TEAL = hex2rgb('#009688')
TEAL50 = hex2rgb('#E0F2F1')
TEAL100 = hex2rgb('#B2DFDB')
TEAL200 = hex2rgb('#80CBC4')
TEAL300 = hex2rgb('#4DB6AC')
TEAL400 = hex2rgb('#26A69A')
TEAL500 = hex2rgb('#009688')
TEAL600 = hex2rgb('#00897B')
TEAL700 = hex2rgb('#00796B')
TEAL800 = hex2rgb('#00695C')
TEAL900 = hex2rgb('#004D40')
TEALA100 = hex2rgb('#A7FFEB')
TEALA200 = hex2rgb('#64FFDA')
TEALA400 = hex2rgb('#1DE9B6')
TEALA700 = hex2rgb('#00BFA5')
# Green
GREEN = hex2rgb('#4CAF50')
GREEN50 = hex2rgb('#E8F5E9')
GREEN100 = hex2rgb('#C8E6C9')
GREEN200 = hex2rgb('#A5D6A7')
GREEN300 = hex2rgb('#81C784')
GREEN400 = hex2rgb('#66BB6A')
GREEN500 = hex2rgb('#4CAF50')
GREEN600 = hex2rgb('#43A047')
GREEN700 = hex2rgb('#388E3C')
GREEN800 = hex2rgb('#2E7D32')
GREEN900 = hex2rgb('#1B5E20')
GREENA100 = hex2rgb('#B9F6CA')
GREENA200 = hex2rgb('#69F0AE')
GREENA400 = hex2rgb('#00E676')
GREENA700 = hex2rgb('#00C853')
# Light Green
LIGHT_GREEN = hex2rgb('#8BC34A')
LIGHT_GREEN50 = hex2rgb('#F1F8E9')
LIGHT_GREEN100 = hex2rgb('#DCEDC8')
LIGHT_GREEN200 = hex2rgb('#C5E1A5')
LIGHT_GREEN300 = hex2rgb('#AED581')
LIGHT_GREEN400 = hex2rgb('#9CCC65')
LIGHT_GREEN500 = hex2rgb('#8BC34A')
LIGHT_GREEN600 = hex2rgb('#7CB342')
LIGHT_GREEN700 = hex2rgb('#689F38')
LIGHT_GREEN800 = hex2rgb('#558B2F')
LIGHT_GREEN900 = hex2rgb('#33691E')
LIGHT_GREENA100 = hex2rgb('#CCFF90')
LIGHT_GREENA200 = hex2rgb('#B2FF59')
LIGHT_GREENA400 = hex2rgb('#76FF03')
LIGHT_GREENA700 = hex2rgb('#64DD17')
# Lime
LIME = hex2rgb('#CDDC39')
LIME50 = hex2rgb('#F9FBE7')
LIME100 = hex2rgb('#F0F4C3')
LIME200 = hex2rgb('#E6EE9C')
LIME300 = hex2rgb('#DCE775')
LIME400 = hex2rgb('#D4E157')
LIME500 = hex2rgb('#CDDC39')
LIME600 = hex2rgb('#C0CA33')
LIME700 = hex2rgb('#AFB42B')
LIME800 = hex2rgb('#9E9D24')
LIME900 = hex2rgb('#827717')
LIMEA100 = hex2rgb('#F4FF81')
LIMEA200 = hex2rgb('#EEFF41')
LIMEA400 = hex2rgb('#C6FF00')
LIMEA700 = hex2rgb('#AEEA00')
# Yellow
YELLOW = hex2rgb('#FFEB3B')
YELLOW50 = hex2rgb('#FFFDE7')
YELLOW100 = hex2rgb('#FFF9C4')
YELLOW200 = hex2rgb('#FFF59D')
YELLOW300 = hex2rgb('#FFF176')
YELLOW400 = hex2rgb('#FFEE58')
YELLOW500 = hex2rgb('#FFEB3B')
YELLOW600 = hex2rgb('#FDD835')
YELLOW700 = hex2rgb('#FBC02D')
YELLOW800 = hex2rgb('#F9A825')
YELLOW900 = hex2rgb('#F57F17')
YELLOWA100 = hex2rgb('#FFFF8D')
YELLOWA200 = hex2rgb('#FFFF00')
YELLOWA400 = hex2rgb('#FFEA00')
YELLOWA700 = hex2rgb('#FFD600')
# Amber
AMBER = hex2rgb('#FFC107')
AMBER50 = hex2rgb('#FFF8E1')
AMBER100 = hex2rgb('#FFECB3')
AMBER200 = hex2rgb('#FFE082')
AMBER300 = hex2rgb('#FFD54F')
AMBER400 = hex2rgb('#FFCA28')
AMBER500 = hex2rgb('#FFC107')
AMBER600 = hex2rgb('#FFB300')
AMBER700 = hex2rgb('#FFA000')
AMBER800 = hex2rgb('#FF8F00')
AMBER900 = hex2rgb('#FF6F00')
AMBERA100 = hex2rgb('#FFE57F')
AMBERA200 = hex2rgb('#FFD740')
AMBERA400 = hex2rgb('#FFC400')
AMBERA700 = hex2rgb('#FFAB00')
# Orange
ORANGE = hex2rgb('#FF9800')
ORANGE50 = hex2rgb('#FFF3E0')
ORANGE100 = hex2rgb('#FFE0B2')
ORANGE200 = hex2rgb('#FFCC80')
ORANGE300 = hex2rgb('#FFB74D')
ORANGE400 = hex2rgb('#FFA726')
ORANGE500 = hex2rgb('#FF9800')
ORANGE600 = hex2rgb('#FB8C00')
ORANGE700 = hex2rgb('#F57C00')
ORANGE800 = hex2rgb('#EF6C00')
ORANGE900 = hex2rgb('#E65100')
ORANGEA100 = hex2rgb('#FFD180')
ORANGEA200 = hex2rgb('#FFAB40')
ORANGEA400 = hex2rgb('#FF9100')
ORANGEA700 = hex2rgb('#FF6D00')
# Deep Orange
DEEP_ORANGE = hex2rgb('#FF5722')
DEEP_ORANGE50 = hex2rgb('#FBE9E7')
DEEP_ORANGE100 = hex2rgb('#FFCCBC')
DEEP_ORANGE200 = hex2rgb('#FFAB91')
DEEP_ORANGE300 = hex2rgb('#FF8A65')
DEEP_ORANGE400 = hex2rgb('#FF7043')
DEEP_ORANGE500 = hex2rgb('#FF5722')
DEEP_ORANGE600 = hex2rgb('#F4511E')
DEEP_ORANGE700 = hex2rgb('#E64A19')
DEEP_ORANGE800 = hex2rgb('#D84315')
DEEP_ORANGE900 = hex2rgb('#BF360C')
DEEP_ORANGEA100 = hex2rgb('#FF9E80')
DEEP_ORANGEA200 = hex2rgb('#FF6E40')
DEEP_ORANGEA400 = hex2rgb('#FF3D00')
DEEP_ORANGEA700 = hex2rgb('#DD2C00')
# Brown
BROWN = hex2rgb('#795548')
BROWN50 = hex2rgb('#EFEBE9')
BROWN100 = hex2rgb('#D7CCC8')
BROWN200 = hex2rgb('#BCAAA4')
BROWN300 = hex2rgb('#A1887F')
BROWN400 = hex2rgb('#8D6E63')
BROWN500 = hex2rgb('#795548')
BROWN600 = hex2rgb('#6D4C41')
BROWN700 = hex2rgb('#5D4037')
BROWN800 = hex2rgb('#4E342E')
BROWN900 = hex2rgb('#3E2723')
# Grey
GREY = hex2rgb('#9E9E9E')
GREY50 = hex2rgb('#FAFAFA')
GREY100 = hex2rgb('#F5F5F5')
GREY200 = hex2rgb('#EEEEEE')
GREY300 = hex2rgb('#E0E0E0')
GREY400 = hex2rgb('#BDBDBD')
GREY500 = hex2rgb('#9E9E9E')
GREY600 = hex2rgb('#757575')
GREY700 = hex2rgb('#616161')
GREY800 = hex2rgb('#424242')
GREY900 = hex2rgb('#212121')
# Blue Grey
BLUE_GREY = hex2rgb('#607D8B')
BLUE_GREY50 = hex2rgb('#ECEFF1')
BLUE_GREY100 = hex2rgb('#CFD8DC')
BLUE_GREY200 = hex2rgb('#B0BEC5')
BLUE_GREY300 = hex2rgb('#90A4AE')
BLUE_GREY400 = hex2rgb('#78909C')
BLUE_GREY500 = hex2rgb('#607D8B')
BLUE_GREY600 = hex2rgb('#546E7A')
BLUE_GREY700 = hex2rgb('#455A64')
BLUE_GREY800 = hex2rgb('#37474F')
BLUE_GREY900 = hex2rgb('#263238')
| 28.134615
| 94
| 0.693389
|
2a078aa1ae6053ec9d817e16c4499d3f94caf80a
| 13,565
|
py
|
Python
|
backend/uclapi/workspaces/views.py
|
nayanadasgupta/uclapi
|
54fbdf4232fe3b309332291c29233dcb6e21b2c9
|
[
"MIT"
] | null | null | null |
backend/uclapi/workspaces/views.py
|
nayanadasgupta/uclapi
|
54fbdf4232fe3b309332291c29233dcb6e21b2c9
|
[
"MIT"
] | null | null | null |
backend/uclapi/workspaces/views.py
|
nayanadasgupta/uclapi
|
54fbdf4232fe3b309332291c29233dcb6e21b2c9
|
[
"MIT"
] | null | null | null |
import re
from base64 import b64decode
from django.utils.decorators import method_decorator
from rest_framework import generics
from rest_framework.decorators import api_view
from rest_framework.exceptions import ParseError
from common.decorators import uclapi_protected_endpoint
from common.helpers import PrettyJsonResponse as JsonResponse, pretty_response
from common.helpers import RateLimitHttpResponse as HttpResponse
from .image_builder import ImageBuilder
from .models import Sensors, Historical, Surveys
from .occupeye.api import OccupEyeApi
from .occupeye.constants import OccupEyeConstants
from .occupeye.exceptions import BadOccupEyeRequest
from .pagination import HistoricalListCursorPagination
from .serializers import SensorsSerializer, HistoricalSerializer, SurveysSerializer
@api_view(["GET"])
@uclapi_protected_endpoint(personal_data=False, last_modified_redis_key="Workspaces")
def get_surveys(request, *args, **kwargs):
api = OccupEyeApi()
consts = OccupEyeConstants()
survey_filter = request.GET.get("survey_filter", "student")
if survey_filter not in consts.VALID_SURVEY_FILTERS:
response = JsonResponse(
{
"ok": False,
"error": "The survey filter you provided is invalid. Valid survey filters are: "
+ str(consts.VALID_SURVEY_FILTERS),
},
custom_header_data=kwargs,
)
response.status_code = 400
return response
response_data = {"ok": True, "surveys": api.get_surveys(survey_filter)}
return JsonResponse(response_data, custom_header_data=kwargs)
@api_view(["GET"])
@uclapi_protected_endpoint(personal_data=False, last_modified_redis_key="Workspaces")
def get_map_image(request, *args, **kwargs):
try:
image_id = request.GET["image_id"]
except KeyError:
response = JsonResponse({"ok": False, "error": "No Image ID provided."}, custom_header_data=kwargs)
response.status_code = 400
return response
api = OccupEyeApi()
try:
(image_b64, content_type) = api.get_image(image_id)
except BadOccupEyeRequest:
response = JsonResponse(
{
"ok": False,
"error": "The image with the ID you requested does not exist.",
},
custom_header_data=kwargs,
)
response.status_code = 400
return response
image_format = request.GET.get("image_format", "base64")
if image_format == "raw":
return HttpResponse(
content=b64decode(image_b64),
custom_header_data=kwargs,
content_type=content_type,
)
elif image_format == "base64":
response = JsonResponse(
{"ok": True, "content_type": content_type, "data": image_b64},
custom_header_data=kwargs,
)
return response
else:
response = JsonResponse(
{
"ok": False,
"error": "You specified a response format that was not either raw or base64.",
},
custom_header_data=kwargs,
)
response.status_code = 400
return response
@api_view(["GET"])
@uclapi_protected_endpoint(personal_data=False, last_modified_redis_key="Workspaces")
def get_survey_sensors(request, *args, **kwargs):
try:
survey_id = request.GET["survey_id"]
except KeyError:
response = JsonResponse({"ok": False, "error": "Please specify a survey_id."})
response.status_code = 400
return response
api = OccupEyeApi()
try:
data = api.get_survey_sensors(survey_id)
except BadOccupEyeRequest:
response = JsonResponse({"ok": False, "error": "The survey_id you specified was not valid."})
response.status_code = 400
return response
response = JsonResponse({"ok": True, **data}, custom_header_data=kwargs)
return response
@api_view(["GET"])
@uclapi_protected_endpoint(personal_data=False, last_modified_redis_key="Workspaces")
def get_survey_max_timestamp(request, *args, **kwargs):
try:
survey_id = request.GET["survey_id"]
except KeyError:
response = JsonResponse({"ok": False, "error": "Please specify a survey_id."})
response.status_code = 400
return response
api = OccupEyeApi()
try:
(survey_id_int, max_timestamp) = api.get_max_survey_timestamp(survey_id)
except BadOccupEyeRequest:
response = JsonResponse({"ok": False, "error": "The survey_id you specified was not valid."})
response.status_code = 400
return response
response = JsonResponse(
{"ok": True, "survey_id": survey_id_int, "last_updated": max_timestamp},
custom_header_data=kwargs,
)
return response
@api_view(["GET"])
@uclapi_protected_endpoint(personal_data=False, last_modified_redis_key="Workspaces")
def get_survey_sensors_summary(request, *args, **kwargs):
survey_ids = request.GET.get("survey_ids", None)
survey_filter = request.GET.get("survey_filter", "student")
consts = OccupEyeConstants()
if survey_filter not in consts.VALID_SURVEY_FILTERS:
response = JsonResponse(
{
"ok": False,
"error": "The survey filter you provided is invalid. Valid survey filters are: "
+ str(consts.VALID_SURVEY_FILTERS),
},
custom_header_data=kwargs,
)
response.status_code = 400
return response
api = OccupEyeApi()
try:
data = api.get_survey_sensors_summary(survey_ids, survey_filter)
except BadOccupEyeRequest:
response = JsonResponse(
{
"ok": False,
"error": "One or more of the survey_ids you requested is not valid.",
},
custom_header_data=kwargs,
)
response.status_code = 400
return response
response = JsonResponse({"ok": True, "surveys": data}, custom_header_data=kwargs)
return response
@api_view(["GET"])
@uclapi_protected_endpoint(personal_data=False, last_modified_redis_key="Workspaces")
def get_averages_time(request, *args, **kwargs):
api = OccupEyeApi()
consts = OccupEyeConstants()
survey_ids = request.GET.get("survey_ids", None)
survey_filter = request.GET.get("survey_filter", "student")
if survey_filter not in consts.VALID_SURVEY_FILTERS:
response = JsonResponse(
{
"ok": False,
"error": "The survey filter you provided is invalid. Valid survey filters are: "
+ str(consts.VALID_SURVEY_FILTERS),
},
custom_header_data=kwargs,
)
response.status_code = 400
return response
try:
day_count = request.GET["days"]
except KeyError:
response = JsonResponse(
{
"ok": False,
"error": "You did not specify how many days of historical data should be returned. "
"Valid options are: " + str(consts.VALID_HISTORICAL_DATA_DAYS),
},
custom_header_data=kwargs,
)
response.status_code = 400
return response
if not day_count.isdigit():
response = JsonResponse(
{
"ok": False,
"error": "You did not specify an integer number of days of historical days. Valid options are: "
+ str(consts.VALID_HISTORICAL_DATA_DAYS),
},
custom_header_data=kwargs,
)
response.status_code = 400
return response
day_count = int(day_count)
if day_count not in consts.VALID_HISTORICAL_DATA_DAYS:
response = JsonResponse(
{
"ok": False,
"error": "You did not specify a valid number of days of historical days. Valid options are: "
+ str(consts.VALID_HISTORICAL_DATA_DAYS),
},
custom_header_data=kwargs,
)
response.status_code = 400
return response
try:
data = api.get_time_averages(survey_ids, day_count, survey_filter)
except BadOccupEyeRequest:
response = JsonResponse(
{
"ok": False,
"error": "One or more of the survey_ids you requested is not valid.",
},
custom_header_data=kwargs,
)
response.status_code = 400
return response
response = JsonResponse({"ok": True, "surveys": data}, custom_header_data=kwargs)
return response
@api_view(["GET"])
@uclapi_protected_endpoint(personal_data=False, last_modified_redis_key="Workspaces")
def get_live_map(request, *args, **kwargs):
try:
survey_id = request.GET["survey_id"]
map_id = request.GET["map_id"]
except KeyError:
response = JsonResponse(
{
"ok": False,
"error": "You must provide a Survey ID and a Map ID to get a live sensor status image.",
},
custom_header_data=kwargs,
)
response.status_code = 400
return response
# Thank you Stack Overflow
# https://stackoverflow.com/a/1636354/5297057
colour_pattern = re.compile("^#(?:[0-9a-fA-F]{3}){1,2}$")
absent_colour = request.GET.get("absent_colour", "#ABE00C")
occupied_colour = request.GET.get("occupied_colour", "#FFC90E")
image_scale_str = request.GET.get("image_scale", "0.02")
circle_radius_str = request.GET.get("circle_radius", "128")
if not re.match(colour_pattern, absent_colour) or not re.match(colour_pattern, occupied_colour):
response = JsonResponse(
{
"ok": False,
"error": (
"The custom colours you specfied did not match "
"the format of HTML hex colours. Colours must "
"either be in the format #ABC or #ABCDEF."
),
},
custom_header_data=kwargs,
)
response.status_code = 400
return response
try:
image_scale = float(image_scale_str)
except ValueError:
response = JsonResponse(
{
"ok": False,
"error": (
"The scale you specified is not valid. It "
"must be a floating point number, such as 1 "
"or 0.02."
),
},
custom_header_data=kwargs,
)
response.status_code = 400
return response
try:
circle_radius = float(circle_radius_str)
except ValueError:
response = JsonResponse(
{
"ok": False,
"error": (
"The circle radiuus you specified is not valid. "
"It must be a floating point number, such as 128 or "
"100.5."
),
},
custom_header_data=kwargs,
)
response.status_code = 400
return response
try:
ib = ImageBuilder(survey_id, map_id)
except BadOccupEyeRequest:
response = JsonResponse(
{
"ok": False,
"error": "Either the IDs you sent were not integers, or they do not exist.",
},
custom_header_data=kwargs,
)
response.status_code = 400
return response
ib.set_colours(absent=absent_colour, occupied=occupied_colour)
ib.set_circle_radius(circle_radius=circle_radius)
ib.set_image_scale(image_scale=image_scale)
map_svg = ib.get_live_map()
response = HttpResponse(map_svg, content_type="image/svg+xml", custom_header_data=kwargs)
response["Content-Length"] = len(map_svg)
return response
class SurveysList(generics.ListAPIView):
queryset = Surveys.objects.all()
serializer_class = SurveysSerializer
filterset_fields = {"survey_id": ["exact"], "active": ["exact"]}
@method_decorator(uclapi_protected_endpoint(personal_data=False, last_modified_redis_key="Workspaces-Historical"))
def list(self, request, *args, **kwargs):
return pretty_response(super().list(request, *args, **kwargs), custom_header_data=kwargs)
class SensorsList(generics.ListAPIView):
queryset = Sensors.objects.all()
serializer_class = SensorsSerializer
filterset_fields = {"survey_id": ["exact"], "sensor_id": ["exact"]}
@method_decorator(uclapi_protected_endpoint(personal_data=False, last_modified_redis_key="Workspaces-Historical"))
def list(self, request, *args, **kwargs):
return pretty_response(super().list(request, *args, **kwargs), custom_header_data=kwargs)
class HistoricalList(generics.ListAPIView):
queryset = Historical.objects.all().order_by("datetime")
serializer_class = HistoricalSerializer
pagination_class = HistoricalListCursorPagination
filterset_fields = {"survey_id": ["exact"], "sensor_id": ["exact"],
"datetime": ["gte", "lte", "exact", "gt", "lt"]}
@method_decorator(uclapi_protected_endpoint(personal_data=False, last_modified_redis_key="Workspaces-Historical"))
def list(self, request, *args, **kwargs):
if self.request.query_params.get("survey_id", None) is None:
raise ParseError("survey_id is a required field")
return pretty_response(super().list(request, *args, **kwargs), custom_header_data=kwargs)
| 34.516539
| 118
| 0.624991
|
6299ac230db4edb3ffd5120db31eb6d17117e1b4
| 1,557
|
py
|
Python
|
alpaca/dataloader/red_wine.py
|
mtereshkin/alpaca
|
1fe46f802dd02a90792d65a03e3e7066fa9adfcf
|
[
"Apache-2.0"
] | null | null | null |
alpaca/dataloader/red_wine.py
|
mtereshkin/alpaca
|
1fe46f802dd02a90792d65a03e3e7066fa9adfcf
|
[
"Apache-2.0"
] | null | null | null |
alpaca/dataloader/red_wine.py
|
mtereshkin/alpaca
|
1fe46f802dd02a90792d65a03e3e7066fa9adfcf
|
[
"Apache-2.0"
] | null | null | null |
from os import path
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from .config import DATA_DIR
from .saver import DataSaver
from .downloader import download
URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv'
class RedWineData:
"""Red wine quality dataset from UCI"""
def __init__(self, use_cache=False, val_split=0.2):
self.use_cache = use_cache
cache_dir = path.join(DATA_DIR, 'dataloader/data/red_wine')
self.saver = DataSaver(cache_dir)
self.val_split = val_split
self._build_dataset(cache_dir)
def dataset(self, label):
if self.use_cache:
return self.saver.load(label)
data = self.data[label]
x, y = data[:, :-1], data[:, -1:]
self.saver.save(x, y, label)
return x, y
def _build_dataset(self, cache_dir):
data_path = download(cache_dir, 'winequality-red.csv', URL)
self.df = pd.read_csv(data_path, sep=';')
table = self.df.to_numpy()
if self.val_split != 0:
train, val = train_test_split(table, test_size=self.val_split, shuffle=True)
else:
train, val = table, []
self.data = {'train': train, 'val': val}
if __name__ == '__main__':
dataset = RedWineData()
x_train, y_train = dataset.dataset('train')
x_val, y_val = dataset.dataset('val')
print(dataset.df.head())
print(x_train.shape, y_train.shape, y_val.shape)
print(x_train[:5], y_train[:5])
| 30.529412
| 98
| 0.648041
|
97b20117b1a92e3efdbf7d3edf603952ff864495
| 1,018
|
py
|
Python
|
rhythms.py
|
gexahedron/sc3_microtonal
|
94c3091705d1e194dab0ec600b35c968ceb79c95
|
[
"MIT"
] | null | null | null |
rhythms.py
|
gexahedron/sc3_microtonal
|
94c3091705d1e194dab0ec600b35c968ceb79c95
|
[
"MIT"
] | null | null | null |
rhythms.py
|
gexahedron/sc3_microtonal
|
94c3091705d1e194dab0ec600b35c968ceb79c95
|
[
"MIT"
] | null | null | null |
# code from https://github.com/brianhouse/bjorklund/blob/master/__init__.py
def euclidean(steps, pulses):
steps = int(steps)
pulses = int(pulses)
if pulses > steps:
raise ValueError
pattern = []
counts = []
remainders = []
divisor = steps - pulses
remainders.append(pulses)
level = 0
while True:
counts.append(divisor // remainders[level])
remainders.append(divisor % remainders[level])
divisor = remainders[level]
level = level + 1
if remainders[level] <= 1:
break
counts.append(divisor)
def build(level):
if level == -1:
pattern.append(0)
elif level == -2:
pattern.append(1)
else:
for i in range(0, counts[level]):
build(level - 1)
if remainders[level] != 0:
build(level - 2)
build(level)
i = pattern.index(1)
pattern = pattern[i:] + pattern[0:i]
return pattern
| 28.277778
| 75
| 0.54224
|
2bf0d75bfe08b6d59f2fcc7f1386f0d5c2b338cc
| 17,552
|
py
|
Python
|
skdaccess/utilities/ode_util.py
|
samiriff/scikit-dataaccess-ode
|
dc08fd67c772d3cd83d0d34183196661b6b53778
|
[
"MIT"
] | null | null | null |
skdaccess/utilities/ode_util.py
|
samiriff/scikit-dataaccess-ode
|
dc08fd67c772d3cd83d0d34183196661b6b53778
|
[
"MIT"
] | null | null | null |
skdaccess/utilities/ode_util.py
|
samiriff/scikit-dataaccess-ode
|
dc08fd67c772d3cd83d0d34183196661b6b53778
|
[
"MIT"
] | null | null | null |
# The MIT License (MIT)
# Copyright (c) 2018 Massachusetts Institute of Technology
#
# Author: Guillaume Rongier
# This software has been created in projects supported by the US National
# Science Foundation and NASA (PI: Pankratius)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# 3rd party imports
import numpy as np
from xml.dom import minidom
from six.moves.urllib.request import urlopen
from osgeo import gdal
# Standard library imports
from collections import OrderedDict
import sys
import re
def query_yes_no(question, default = "yes"):
'''
Ask a yes/no question via raw_input() and return the answer
Written by Trent Mick under the MIT license, see:
https://code.activestate.com/recipes/577058-query-yesno/
@param question: A string that is presented to the user
@param default: The presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user)
@return The "answer", i.e., either "yes" or "no"
'''
valid = {"yes":"yes", "y":"yes", "ye":"yes",
"no":"no", "n":"no"}
if default == None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "\
"(or 'y' or 'n').\n")
def get_query_url(target, mission, instrument, product_type,
western_lon, eastern_lon, min_lat, max_lat,
min_ob_time, max_ob_time, product_id,
query_type, output, results,
number_product_limit, result_offset_number):
'''
Build the query URL using ODE REST interface
Adapted from the Orbital Data Explorer (ODE) REST Interface Manual
@param target: Aimed planetary body, i.e., Mars, Mercury, Moon, Phobos, or Venus
@param mission: Aimed mission, e.g., MGS or MRO
@param instrument: Aimed instrument from the mission, e.g., HIRISE or CRISM
@param product_type: Type of product to look for, e.g., DTM or RDRV11
@param western_lon: Western longitude to look for the data, from 0 to 360
@param eastern_lon: Eastern longitude to look for the data, from 0 to 360
@param min_lat: Minimal latitude to look for the data, from -90 to 90
@param max_lat: Maximal latitude to look for the data, from -90 to 90
@param min_ob_time: Minimal observation time in (even partial) UTC format, e.g., '2017-03-01'
@param max_ob_time: Maximal observation time in (even partial) UTC format, e.g., '2017-03-01'
@param product_id: PDS Product Id to look for, with wildcards (*) allowed
@param query_type: File type to look for, i.e., Product, Browse, Derived, or Referenced
@param output: Return format for product queries or error messages, i.e, XML or JSON
@param results: Type of files to look for, i.e., c: count of products; o: ODE Product ID;
p: PDS product identifies; m: product metadata; f: product files; b: browse image;
t: thumbnail image; l: complete PDS label; x: single product footprint
@param number_product_limit: Maximal number of products to return (100 at most)
@param result_offset_number: Offset the return products, to go beyond the limit of 100 returned products
@return Query URL
'''
ODE_REST_base_url = "http://oderest.rsl.wustl.edu/live2/?"
target = 'target=' + target
mission = '&ihid=' + mission
instrument = '&iid=' + instrument
product_type = '&pt=' + product_type
if western_lon is not None:
western_lon = '&westernlon=' + str(western_lon)
else:
western_lon = ''
if eastern_lon is not None:
eastern_lon = '&easternlon=' + str(eastern_lon)
else:
eastern_lon = ''
if min_lat is not None:
min_lat = '&minlat=' + str(min_lat)
else:
min_lat = ''
if max_lat is not None:
max_lat = '&maxlat=' + str(max_lat)
else:
max_lat = ''
if min_ob_time != '':
min_ob_time = '&mincreationtime=' + min_ob_time
if max_ob_time != '':
max_ob_time = '&maxcreationtime=' + max_ob_time
if product_id != '':
product_id = '&productid=' + product_id
if query_type != '':
query_type = '&query=' + query_type
if results != '':
results = '&results=' + results
if output != '':
output = '&output=' + output
if number_product_limit != '':
number_product_limit = '&limit=' + str(number_product_limit)
if result_offset_number != '':
result_offset_number = '&offset=' + str(result_offset_number)
# Concatenate the REST request
return ODE_REST_base_url + target + mission + instrument + product_type \
+ western_lon + eastern_lon + min_lat + max_lat + min_ob_time \
+ max_ob_time + query_type + results + output + number_product_limit \
+ result_offset_number + product_id
def get_files_urls(query_url, file_name = '*', print_info = False, limit_file_types = 'Product'):
'''
Retrieve the files' URLs based on a query from ODE REST interface
Adapted from the Orbital Data Explorer (ODE) REST Interface Manual
@param query_url: URL resulting from the query of ODE
@param file_name: File name to look for, with wildcards (*) allowed
@param print_info: Print the files that will be downloaded
@param limit_file_types: Type of file type to select in results (Default: Product)
@return List of URLs
'''
url = urlopen(query_url)
query_results = url.read()
xml_results = minidom.parseString(query_results)
url.close()
error = xml_results.getElementsByTagName('Error')
if len(error) > 0:
print('\nError:', error[0].firstChild.data)
return None
file_name = file_name.replace('*', '.')
products = xml_results.getElementsByTagName('Product')
file_urls = OrderedDict()
for product in products:
product_files = product.getElementsByTagName('Product_file')
product_id = product.getElementsByTagName('pdsid')[0]
if print_info == True:
print('\nProduct ID:', product_id.firstChild.data)
for product_file in product_files:
file_type = product_file.getElementsByTagName('Type')[0]
file_url = product_file.getElementsByTagName('URL')[0]
file_description = product_file.getElementsByTagName('Description')[0]
local_filename = file_url.firstChild.data.split('/')[-1]
local_file_extension = local_filename.split('.')[-1]
if limit_file_types == 'Browse':
if local_file_extension.endswith('JP2') or local_file_extension.endswith('IMG'):
continue
if re.search(file_name, local_filename) is not None:
# Restriction on the file type to download
if len(limit_file_types) > 0:
# If match, get the URL
if file_type.firstChild.data == limit_file_types:
file_urls[file_url.firstChild.data] = (product_id.firstChild.data,
file_description.firstChild.data)
if print_info == True:
print('File name:', file_url.firstChild.data.split('/')[-1])
print('Description:', file_description.firstChild.data)
# No restriction on the file type to download
else:
file_urls[file_url.firstChild.data] = (product_id.firstChild.data,
file_description.firstChild.data)
if print_info == True:
print('File name:', file_url.firstChild.data.split('/')[-1])
print('Description:', file_description.firstChild.data)
return file_urls
def query_files_urls(target, mission, instrument, product_type,
western_lon, eastern_lon, min_lat, max_lat,
min_ob_time, max_ob_time, product_id, file_name,
number_product_limit, result_offset_number, limit_file_types = 'Product'):
'''
Retrieve the URL locations based on a query using ODE REST interface
@param target: Aimed planetary body, i.e., Mars, Mercury, Moon, Phobos, or Venus
@param mission: Aimed mission, e.g., MGS or MRO
@param instrument: Aimed instrument from the mission, e.g., HIRISE or CRISM
@param product_type: Type of product to look for, e.g., DTM or RDRV11
@param western_lon: Western longitude to look for the data, from 0 to 360
@param eastern_lon: Eastern longitude to look for the data, from 0 to 360
@param min_lat: Minimal latitude to look for the data, from -90 to 90
@param max_lat: Maximal latitude to look for the data, from -90 to 90
@param min_ob_time: Minimal observation time in (even partial) UTC format, e.g., '2017-03-01'
@param max_ob_time: Maximal observation time in (even partial) UTC format, e.g., '2017-03-01'
@param product_id: PDS Product Id to look for, with wildcards (*) allowed
@param file_name: File name to look for, with wildcards (*) allowed
@param number_product_limit: Maximal number of products to return (100 at most)
@param result_offset_number: Offset the return products, to go beyond the limit of 100 returned products
@param limit_file_types: Type of file type to select in results (Default: Product)
@return List of URL locations
'''
# Returns a list of products with selected product metadata that meet the query parameters
query_type = 'product'
# Controls the return format for product queries or error messages
output = 'XML'
# For each product found return the product files and IDS
results = 'fp'
query_url = get_query_url(target, mission, instrument, product_type,
western_lon, eastern_lon, min_lat, max_lat,
min_ob_time, max_ob_time, product_id,
query_type, output, results,
number_product_limit, result_offset_number)
print('Query URL:', query_url)
print('\nFiles that will be downloaded (if not previously downloaded):')
file_urls = get_files_urls(query_url, file_name, print_info = True, limit_file_types=limit_file_types)
if file_urls is None:
return OrderedDict()
elif len(file_urls) > 0:
should_continue = query_yes_no('\nDo you want to proceed?')
if should_continue == "no":
return OrderedDict()
else:
print('\nNo file found')
return file_urls
def correct_CRISM_label(label_file_location):
'''
Correct CRISM label file and allow GDAL to read it properly.
Necessary for Targeted Reduced Data Record (TRDR) data
Adapted from https://github.com/jlaura/crism/blob/master/csas.py
@param label_file_location: Local address of the current label
@return Local address of the new label
'''
new_label_file_location = label_file_location
if '_fixed' not in new_label_file_location:
new_label_file_location = '.'.join(label_file_location.split('.')[:-1]) \
+ '_fixed.' + label_file_location.split('.')[-1]
new_label_file = open(new_label_file_location, 'w')
for line in open(label_file_location, 'r'):
if "OBJECT = FILE" in line:
line = "/* OBJECT = FILE */\n"
if "LINES" in line:
lines = int(line.split("=")[1])
if "LINE_SAMPLES" in line:
samples = int(line.split("=")[1])
new_label_file.write(line)
new_label_file.close()
return new_label_file_location
def correct_file_name_case_in_label(label_file_location, other_file_locations):
'''
Correct a label file if the case of the related data file(s) is incorrect
and GDAL cannot read it properly
@param label_file_location: Local address of the current label
@param other_file_locations: Other files that were downloaded with the label file
@return Local address of the new label
'''
label_file_name = '_'.join('.'.join(label_file_location.split('/')[-1].split('.')[:-1]).split('_')[:-1])
insensitive_lalels = []
for file_location in other_file_locations:
file_name = '.'.join(file_location.split('/')[-1].split('.')[:-1])
if (file_location != label_file_location
and file_name == label_file_name):
insensitive_lalel = re.compile(re.escape(file_location.split('/')[-1]),
re.IGNORECASE)
insensitive_lalels.append((insensitive_lalel,
file_location.split('/')[-1]))
with open(label_file_location, 'r') as file:
label_file = file.read()
for insensitive_lalel, sensitive_lalel in insensitive_lalels:
label_file = insensitive_lalel.sub(sensitive_lalel, label_file)
new_label_file_location = label_file_location
if '_fixed' not in new_label_file_location:
new_label_file_location = '.'.join(label_file_location.split('.')[:-1]) \
+ '_fixed.' + label_file_location.split('.')[-1]
with open(new_label_file_location, 'w') as file:
file.write(label_file)
return new_label_file_location
def correct_label_file(label_file_location, other_file_locations = []):
'''
Correct a label file if GDAL cannot open the corresponding data file
@param label_file_location: Local address of the current label
@param other_file_locations: Other files that were downloaded with the label file
@return Local address of the new label
'''
# Correction not limited to CRISM data, in case other data had similar issues
new_label_file_location = correct_CRISM_label(label_file_location)
return correct_file_name_case_in_label(new_label_file_location,
other_file_locations)
def get_raster_array(gdal_raster, remove_ndv = True):
'''
Get a NumPy array from a raster opened with GDAL
@param gdal_raster: A raster opened with GDAL
@param remove_ndv: Replace the no-data value as mentionned in the label by np.nan
@return The array
'''
assert gdal_raster is not None, 'No raster available'
number_of_bands = gdal_raster.RasterCount
raster_array = gdal_raster.ReadAsArray().astype(np.float)
for i_band in range(number_of_bands):
raster_band = gdal_raster.GetRasterBand(i_band + 1)
no_data_value = raster_band.GetNoDataValue()
if no_data_value is not None and remove_ndv == True:
if number_of_bands > 1:
raster_array[i_band, :, :][raster_array[i_band, :, :] == no_data_value] = np.nan
else:
raster_array[raster_array == no_data_value] = np.nan
scale = raster_band.GetScale()
if scale is None:
scale = 1.
offset = raster_band.GetOffset()
if offset is None:
offset = 0.
if number_of_bands > 1:
raster_array[i_band, :, :] = raster_array[i_band, :, :]*scale + offset
else:
raster_array = raster_array*scale + offset
return raster_array
def get_raster_extent(gdal_raster):
'''
Get the extent of a raster opened with GDAL
@param gdal_raster: A raster opened with GDAL
@return The raster extent
'''
assert gdal_raster is not None, 'No raster available'
raster_x_size = gdal_raster.RasterXSize
raster_y_size = gdal_raster.RasterYSize
geotransform = gdal_raster.GetGeoTransform()
xmin = geotransform[0]
ymax = geotransform[3]
xmax = xmin + geotransform[1]*raster_x_size
ymin = ymax + geotransform[5]*raster_y_size
return (xmin, xmax, ymin, ymax)
| 44.100503
| 108
| 0.644599
|
09b498b5000bf12d1e8ee13d3533595dcc25b1b1
| 87,675
|
py
|
Python
|
sympy/utilities/runtests.py
|
Corwinpro/sympy
|
a2efa19333fa0b3b18db872efabbb46248cde63b
|
[
"BSD-3-Clause"
] | 2
|
2019-10-18T12:45:34.000Z
|
2020-08-10T08:27:59.000Z
|
sympy/utilities/runtests.py
|
Corwinpro/sympy
|
a2efa19333fa0b3b18db872efabbb46248cde63b
|
[
"BSD-3-Clause"
] | 7
|
2015-03-23T23:33:02.000Z
|
2019-02-09T00:19:41.000Z
|
sympy/utilities/runtests.py
|
Corwinpro/sympy
|
a2efa19333fa0b3b18db872efabbb46248cde63b
|
[
"BSD-3-Clause"
] | 1
|
2019-10-18T12:39:41.000Z
|
2019-10-18T12:39:41.000Z
|
"""
This is our testing framework.
Goals:
* it should be compatible with py.test and operate very similarly
(or identically)
* doesn't require any external dependencies
* preferably all the functionality should be in this file only
* no magic, just import the test file and execute the test functions, that's it
* portable
"""
from __future__ import print_function, division
import os
import sys
import platform
import inspect
import traceback
import pdb
import re
import linecache
import time
from fnmatch import fnmatch
from timeit import default_timer as clock
import doctest as pdoctest # avoid clashing with our doctest() function
from doctest import DocTestFinder, DocTestRunner
import random
import subprocess
import signal
import stat
import tempfile
from sympy.core.cache import clear_cache
from sympy.core.compatibility import exec_, PY3, string_types, range, unwrap
from sympy.utilities.misc import find_executable
from sympy.external import import_module
from sympy.utilities.exceptions import SymPyDeprecationWarning
IS_WINDOWS = (os.name == 'nt')
ON_TRAVIS = os.getenv('TRAVIS_BUILD_NUMBER', None)
# emperically generated list of the proportion of time spent running
# an even split of tests. This should periodically be regenerated.
# A list of [.6, .1, .3] would mean that if the tests are evenly split
# into '1/3', '2/3', '3/3', the first split would take 60% of the time,
# the second 10% and the third 30%. These lists are normalized to sum
# to 1, so [60, 10, 30] has the same behavior as [6, 1, 3] or [.6, .1, .3].
#
# This list can be generated with the code:
# from time import time
# import sympy
#
# delays, num_splits = [], 30
# for i in range(1, num_splits + 1):
# tic = time()
# sympy.test(split='{}/{}'.format(i, num_splits), time_balance=False) # Add slow=True for slow tests
# delays.append(time() - tic)
# tot = sum(delays)
# print([round(x / tot, 4) for x in delays])
SPLIT_DENSITY = [0.0801, 0.0099, 0.0429, 0.0103, 0.0122, 0.0055, 0.0533, 0.0191, 0.0977, 0.0878, 0.0026, 0.0028, 0.0147, 0.0118, 0.0358, 0.0063, 0.0026, 0.0351, 0.0084, 0.0027, 0.0158, 0.0156, 0.0024, 0.0416, 0.0566, 0.0425, 0.2123, 0.0042, 0.0099, 0.0576]
SPLIT_DENSITY_SLOW = [0.1525, 0.0342, 0.0092, 0.0004, 0.0005, 0.0005, 0.0379, 0.0353, 0.0637, 0.0801, 0.0005, 0.0004, 0.0133, 0.0021, 0.0098, 0.0108, 0.0005, 0.0076, 0.0005, 0.0004, 0.0056, 0.0093, 0.0005, 0.0264, 0.0051, 0.0956, 0.2983, 0.0005, 0.0005, 0.0981]
class Skipped(Exception):
pass
class TimeOutError(Exception):
pass
class DependencyError(Exception):
pass
# add more flags ??
future_flags = division.compiler_flag
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in ``s``, and return the result.
If the string ``s`` is Unicode, it is encoded using the stdout
encoding and the ``backslashreplace`` error handler.
"""
# After a 2to3 run the below code is bogus, so wrap it with a version check
if not PY3:
if isinstance(s, unicode):
s = s.encode(pdoctest._encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
pdoctest._indent = _indent
# override reporter to maintain windows and python3
def _report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
s = self._checker.output_difference(example, got, self.optionflags)
s = s.encode('raw_unicode_escape').decode('utf8', 'ignore')
out(self._failure_header(test, example) + s)
if PY3 and IS_WINDOWS:
DocTestRunner.report_failure = _report_failure
def convert_to_native_paths(lst):
"""
Converts a list of '/' separated paths into a list of
native (os.sep separated) paths and converts to lowercase
if the system is case insensitive.
"""
newlst = []
for i, rv in enumerate(lst):
rv = os.path.join(*rv.split("/"))
# on windows the slash after the colon is dropped
if sys.platform == "win32":
pos = rv.find(':')
if pos != -1:
if rv[pos + 1] != '\\':
rv = rv[:pos + 1] + '\\' + rv[pos + 1:]
newlst.append(os.path.normcase(rv))
return newlst
def get_sympy_dir():
"""
Returns the root sympy directory and set the global value
indicating whether the system is case sensitive or not.
"""
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
return os.path.normcase(sympy_dir)
def setup_pprint():
from sympy import pprint_use_unicode, init_printing
import sympy.interactive.printing as interactive_printing
# force pprint to be in ascii mode in doctests
use_unicode_prev = pprint_use_unicode(False)
# hook our nice, hash-stable strprinter
init_printing(pretty_print=False)
# Prevent init_printing() in doctests from affecting other doctests
interactive_printing.NO_GLOBAL = True
return use_unicode_prev
def run_in_subprocess_with_hash_randomization(
function, function_args=(),
function_kwargs=None, command=sys.executable,
module='sympy.utilities.runtests', force=False):
"""
Run a function in a Python subprocess with hash randomization enabled.
If hash randomization is not supported by the version of Python given, it
returns False. Otherwise, it returns the exit value of the command. The
function is passed to sys.exit(), so the return value of the function will
be the return value.
The environment variable PYTHONHASHSEED is used to seed Python's hash
randomization. If it is set, this function will return False, because
starting a new subprocess is unnecessary in that case. If it is not set,
one is set at random, and the tests are run. Note that if this
environment variable is set when Python starts, hash randomization is
automatically enabled. To force a subprocess to be created even if
PYTHONHASHSEED is set, pass ``force=True``. This flag will not force a
subprocess in Python versions that do not support hash randomization (see
below), because those versions of Python do not support the ``-R`` flag.
``function`` should be a string name of a function that is importable from
the module ``module``, like "_test". The default for ``module`` is
"sympy.utilities.runtests". ``function_args`` and ``function_kwargs``
should be a repr-able tuple and dict, respectively. The default Python
command is sys.executable, which is the currently running Python command.
This function is necessary because the seed for hash randomization must be
set by the environment variable before Python starts. Hence, in order to
use a predetermined seed for tests, we must start Python in a separate
subprocess.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
Examples
========
>>> from sympy.utilities.runtests import (
... run_in_subprocess_with_hash_randomization)
>>> # run the core tests in verbose mode
>>> run_in_subprocess_with_hash_randomization("_test",
... function_args=("core",),
... function_kwargs={'verbose': True}) # doctest: +SKIP
# Will return 0 if sys.executable supports hash randomization and tests
# pass, 1 if they fail, and False if it does not support hash
# randomization.
"""
# Note, we must return False everywhere, not None, as subprocess.call will
# sometimes return None.
# First check if the Python version supports hash randomization
# If it doesn't have this support, it won't reconize the -R flag
p = subprocess.Popen([command, "-RV"], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode != 0:
return False
hash_seed = os.getenv("PYTHONHASHSEED")
if not hash_seed:
os.environ["PYTHONHASHSEED"] = str(random.randrange(2**32))
else:
if not force:
return False
function_kwargs = function_kwargs or {}
# Now run the command
commandstring = ("import sys; from %s import %s;sys.exit(%s(*%s, **%s))" %
(module, function, function, repr(function_args),
repr(function_kwargs)))
try:
p = subprocess.Popen([command, "-R", "-c", commandstring])
p.communicate()
except KeyboardInterrupt:
p.wait()
finally:
# Put the environment variable back, so that it reads correctly for
# the current Python process.
if hash_seed is None:
del os.environ["PYTHONHASHSEED"]
else:
os.environ["PYTHONHASHSEED"] = hash_seed
return p.returncode
def run_all_tests(test_args=(), test_kwargs=None,
doctest_args=(), doctest_kwargs=None,
examples_args=(), examples_kwargs=None):
"""
Run all tests.
Right now, this runs the regular tests (bin/test), the doctests
(bin/doctest), the examples (examples/all.py), and the sage tests (see
sympy/external/tests/test_sage.py).
This is what ``setup.py test`` uses.
You can pass arguments and keyword arguments to the test functions that
support them (for now, test, doctest, and the examples). See the
docstrings of those functions for a description of the available options.
For example, to run the solvers tests with colors turned off:
>>> from sympy.utilities.runtests import run_all_tests
>>> run_all_tests(test_args=("solvers",),
... test_kwargs={"colors:False"}) # doctest: +SKIP
"""
tests_successful = True
test_kwargs = test_kwargs or {}
doctest_kwargs = doctest_kwargs or {}
examples_kwargs = examples_kwargs or {'quiet': True}
try:
# Regular tests
if not test(*test_args, **test_kwargs):
# some regular test fails, so set the tests_successful
# flag to false and continue running the doctests
tests_successful = False
# Doctests
print()
if not doctest(*doctest_args, **doctest_kwargs):
tests_successful = False
# Examples
print()
sys.path.append("examples")
from all import run_examples # examples/all.py
if not run_examples(*examples_args, **examples_kwargs):
tests_successful = False
# Sage tests
if sys.platform != "win32" and not PY3 and os.path.exists("bin/test"):
# run Sage tests; Sage currently doesn't support Windows or Python 3
# Only run Sage tests if 'bin/test' is present (it is missing from
# our release because everything in the 'bin' directory gets
# installed).
dev_null = open(os.devnull, 'w')
if subprocess.call("sage -v", shell=True, stdout=dev_null,
stderr=dev_null) == 0:
if subprocess.call("sage -python bin/test "
"sympy/external/tests/test_sage.py",
shell=True, cwd=os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) != 0:
tests_successful = False
if tests_successful:
return
else:
# Return nonzero exit code
sys.exit(1)
except KeyboardInterrupt:
print()
print("DO *NOT* COMMIT!")
sys.exit(1)
def test(*paths, **kwargs):
"""
Run tests in the specified test_*.py files.
Tests in a particular test_*.py file are run if any of the given strings
in ``paths`` matches a part of the test file's path. If ``paths=[]``,
tests in all test_*.py files are run.
Notes:
- If sort=False, tests are run in random order (not default).
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
**Explanation of test results**
====== ===============================================================
Output Meaning
====== ===============================================================
. passed
F failed
X XPassed (expected to fail but passed)
f XFAILed (expected to fail and indeed failed)
s skipped
w slow
T timeout (e.g., when ``--timeout`` is used)
K KeyboardInterrupt (when running the slow tests with ``--slow``,
you can interrupt one of them without killing the test runner)
====== ===============================================================
Colors have no additional meaning and are used just to facilitate
interpreting the output.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.test() # doctest: +SKIP
Run one file:
>>> sympy.test("sympy/core/tests/test_basic.py") # doctest: +SKIP
>>> sympy.test("_basic") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... "sympy/functions") # doctest: +SKIP
Run all tests in sympy/core and sympy/utilities:
>>> sympy.test("/core", "/util") # doctest: +SKIP
Run specific test from a file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... kw="test_equality") # doctest: +SKIP
Run specific test from any file:
>>> sympy.test(kw="subs") # doctest: +SKIP
Run the tests with verbose mode on:
>>> sympy.test(verbose=True) # doctest: +SKIP
Don't sort the test output:
>>> sympy.test(sort=False) # doctest: +SKIP
Turn on post-mortem pdb:
>>> sympy.test(pdb=True) # doctest: +SKIP
Turn off colors:
>>> sympy.test(colors=False) # doctest: +SKIP
Force colors, even when the output is not to a terminal (this is useful,
e.g., if you are piping to ``less -r`` and you still want colors)
>>> sympy.test(force_colors=False) # doctest: +SKIP
The traceback verboseness can be set to "short" or "no" (default is
"short")
>>> sympy.test(tb='no') # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. For instance, to run the first half of the test suite:
>>> sympy.test(split='1/2') # doctest: +SKIP
The ``time_balance`` option can be passed in conjunction with ``split``.
If ``time_balance=True`` (the default for ``sympy.test``), sympy will attempt
to split the tests such that each split takes equal time. This heuristic
for balancing is based on pre-recorded test data.
>>> sympy.test(split='1/2', time_balance=True) # doctest: +SKIP
You can disable running the tests in a separate subprocess using
``subprocess=False``. This is done to support seeding hash randomization,
which is enabled by default in the Python versions where it is supported.
If subprocess=False, hash randomization is enabled/disabled according to
whether it has been enabled or not in the calling Python process.
However, even if it is enabled, the seed cannot be printed unless it is
called from a new Python process.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
If hash randomization is not supported ``subprocess=False`` is used
automatically.
>>> sympy.test(subprocess=False) # doctest: +SKIP
To set the hash randomization seed, set the environment variable
``PYTHONHASHSEED`` before running the tests. This can be done from within
Python using
>>> import os
>>> os.environ['PYTHONHASHSEED'] = '42' # doctest: +SKIP
Or from the command line using
$ PYTHONHASHSEED=42 ./bin/test
If the seed is not set, a random seed will be chosen.
Note that to reproduce the same hash values, you must use both the same seed
as well as the same architecture (32-bit vs. 64-bit).
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_test",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_test(*paths, **kwargs))
if not val or i == 0:
return val
def _test(*paths, **kwargs):
"""
Internal function that actually runs the tests.
All keyword arguments from ``test()`` are passed to this function except for
``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstring of
``test()`` for more information.
"""
verbose = kwargs.get("verbose", False)
tb = kwargs.get("tb", "short")
kw = kwargs.get("kw", None) or ()
# ensure that kw is a tuple
if isinstance(kw, string_types):
kw = (kw, )
post_mortem = kwargs.get("pdb", False)
colors = kwargs.get("colors", True)
force_colors = kwargs.get("force_colors", False)
sort = kwargs.get("sort", True)
seed = kwargs.get("seed", None)
if seed is None:
seed = random.randrange(100000000)
timeout = kwargs.get("timeout", False)
fail_on_timeout = kwargs.get("fail_on_timeout", False)
if ON_TRAVIS and timeout is False:
# Travis times out if no activity is seen for 10 minutes.
timeout = 595
fail_on_timeout = True
slow = kwargs.get("slow", False)
enhance_asserts = kwargs.get("enhance_asserts", False)
split = kwargs.get('split', None)
time_balance = kwargs.get('time_balance', True)
blacklist = kwargs.get('blacklist', ['sympy/integrals/rubi/rubi_tests/tests'])
blacklist = convert_to_native_paths(blacklist)
fast_threshold = kwargs.get('fast_threshold', None)
slow_threshold = kwargs.get('slow_threshold', None)
r = PyTestReporter(verbose=verbose, tb=tb, colors=colors,
force_colors=force_colors, split=split)
t = SymPyTests(r, kw, post_mortem, seed,
fast_threshold=fast_threshold,
slow_threshold=slow_threshold)
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
warnings.filterwarnings('error', '.*', DeprecationWarning, module='sympy.*')
test_files = t.get_test_files('sympy')
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
density = None
if time_balance:
if slow:
density = SPLIT_DENSITY_SLOW
else:
density = SPLIT_DENSITY
if split:
matched = split_list(matched, split, density=density)
t._testfiles.extend(matched)
return int(not t.test(sort=sort, timeout=timeout, slow=slow,
enhance_asserts=enhance_asserts, fail_on_timeout=fail_on_timeout))
def doctest(*paths, **kwargs):
r"""
Runs doctests in all \*.py files in the sympy directory which match
any of the given strings in ``paths`` or all tests if paths=[].
Notes:
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.doctest() # doctest: +SKIP
Run one file:
>>> sympy.doctest("sympy/core/basic.py") # doctest: +SKIP
>>> sympy.doctest("polynomial.rst") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.doctest("/functions", "basic.py") # doctest: +SKIP
Run any file having polynomial in its name, doc/src/modules/polynomial.rst,
sympy/functions/special/polynomials.py, and sympy/polys/polynomial.py:
>>> sympy.doctest("polynomial") # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. Note that the regular doctests and the Sphinx
doctests are split independently. For instance, to run the first half of
the test suite:
>>> sympy.doctest(split='1/2') # doctest: +SKIP
The ``subprocess`` and ``verbose`` options are the same as with the function
``test()``. See the docstring of that function for more information.
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_doctest",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_doctest(*paths, **kwargs))
if not val or i == 0:
return val
def _get_doctest_blacklist():
'''Get the default blacklist for the doctests'''
blacklist = []
blacklist.extend([
"doc/src/modules/plotting.rst", # generates live plots
"doc/src/modules/physics/mechanics/autolev_parser.rst",
"sympy/physics/gaussopt.py", # raises deprecation warning
"sympy/galgebra.py", # raises ImportError
"sympy/this.py", # Prints text to the terminal
"sympy/matrices/densearith.py", # raises deprecation warning
"sympy/matrices/densesolve.py", # raises deprecation warning
"sympy/matrices/densetools.py", # raises deprecation warning
"sympy/physics/unitsystems.py", # raises deprecation warning
"sympy/parsing/autolev/_antlr/autolevlexer.py", # generated code
"sympy/parsing/autolev/_antlr/autolevparser.py", # generated code
"sympy/parsing/autolev/_antlr/autolevlistener.py", # generated code
"sympy/parsing/latex/_antlr/latexlexer.py", # generated code
"sympy/parsing/latex/_antlr/latexparser.py", # generated code
"sympy/integrals/rubi/rubi.py"
])
# autolev parser tests
num = 12
for i in range (1, num+1):
blacklist.append("sympy/parsing/autolev/test-examples/ruletest" + str(i) + ".py")
blacklist.extend(["sympy/parsing/autolev/test-examples/pydy-example-repo/mass_spring_damper.py",
"sympy/parsing/autolev/test-examples/pydy-example-repo/chaos_pendulum.py",
"sympy/parsing/autolev/test-examples/pydy-example-repo/double_pendulum.py",
"sympy/parsing/autolev/test-examples/pydy-example-repo/non_min_pendulum.py"])
if import_module('numpy') is None:
blacklist.extend([
"sympy/plotting/experimental_lambdify.py",
"sympy/plotting/plot_implicit.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py",
"examples/intermediate/sample.py",
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py",
"doc/src/modules/numeric-computation.rst"
])
else:
if import_module('matplotlib') is None:
blacklist.extend([
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py"
])
else:
# Use a non-windowed backend, so that the tests work on Travis
import matplotlib
matplotlib.use('Agg')
if import_module('pyglet') is None:
blacklist.extend(["sympy/plotting/pygletplot"])
if import_module('theano') is None:
blacklist.extend([
"sympy/printing/theanocode.py",
"doc/src/modules/numeric-computation.rst",
])
if import_module('antlr4') is None:
blacklist.extend([
"sympy/parsing/autolev/__init__.py",
"sympy/parsing/latex/_parse_latex_antlr.py",
])
# disabled because of doctest failures in asmeurer's bot
blacklist.extend([
"sympy/utilities/autowrap.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py"
])
# blacklist these modules until issue 4840 is resolved
blacklist.extend([
"sympy/conftest.py",
"sympy/utilities/benchmarking.py"
])
blacklist = convert_to_native_paths(blacklist)
return blacklist
def _doctest(*paths, **kwargs):
"""
Internal function that actually runs the doctests.
All keyword arguments from ``doctest()`` are passed to this function
except for ``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstrings of
``doctest()`` and ``test()`` for more information.
"""
from sympy import pprint_use_unicode
normal = kwargs.get("normal", False)
verbose = kwargs.get("verbose", False)
colors = kwargs.get("colors", True)
force_colors = kwargs.get("force_colors", False)
blacklist = kwargs.get("blacklist", [])
split = kwargs.get('split', None)
blacklist.extend(_get_doctest_blacklist())
# Use a non-windowed backend, so that the tests work on Travis
if import_module('matplotlib') is not None:
import matplotlib
matplotlib.use('Agg')
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Disable showing up of plots
from sympy.plotting.plot import unset_show
unset_show()
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
warnings.filterwarnings('error', '.*', DeprecationWarning, module='sympy.*')
r = PyTestReporter(verbose, split=split, colors=colors,\
force_colors=force_colors)
t = SymPyDocTests(r, normal)
test_files = t.get_test_files('sympy')
test_files.extend(t.get_test_files('examples', init_only=False))
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# take only what was requested...but not blacklisted items
# and allow for partial match anywhere or fnmatch of name
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
# run the tests and record the result for this *py portion of the tests
if t._testfiles:
failed = not t.test()
else:
failed = False
# N.B.
# --------------------------------------------------------------------
# Here we test *.rst files at or below doc/src. Code from these must
# be self supporting in terms of imports since there is no importing
# of necessary modules by doctest.testfile. If you try to pass *.py
# files through this they might fail because they will lack the needed
# imports and smarter parsing that can be done with source code.
#
test_files = t.get_test_files('doc/src', '*.rst', init_only=False)
test_files.sort()
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# Take only what was requested as long as it's not on the blacklist.
# Paths were already made native in *py tests so don't repeat here.
# There's no chance of having a *py file slip through since we
# only have *rst files in test_files.
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
first_report = True
for rst_file in matched:
if not os.path.isfile(rst_file):
continue
old_displayhook = sys.displayhook
try:
use_unicode_prev = setup_pprint()
out = sympytestfile(
rst_file, module_relative=False, encoding='utf-8',
optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
finally:
# make sure we return to the original displayhook in case some
# doctest has changed that
sys.displayhook = old_displayhook
# The NO_GLOBAL flag overrides the no_global flag to init_printing
# if True
import sympy.interactive.printing as interactive_printing
interactive_printing.NO_GLOBAL = False
pprint_use_unicode(use_unicode_prev)
rstfailed, tested = out
if tested:
failed = rstfailed or failed
if first_report:
first_report = False
msg = 'rst doctests start'
if not t._testfiles:
r.start(msg=msg)
else:
r.write_center(msg)
print()
# use as the id, everything past the first 'sympy'
file_id = rst_file[rst_file.find('sympy') + len('sympy') + 1:]
print(file_id, end=" ")
# get at least the name out so it is know who is being tested
wid = r.terminal_width - len(file_id) - 1 # update width
test_file = '[%s]' % (tested)
report = '[%s]' % (rstfailed or 'OK')
print(''.join(
[test_file, ' '*(wid - len(test_file) - len(report)), report])
)
# the doctests for *py will have printed this message already if there was
# a failure, so now only print it if there was intervening reporting by
# testing the *rst as evidenced by first_report no longer being True.
if not first_report and failed:
print()
print("DO *NOT* COMMIT!")
return int(failed)
sp = re.compile(r'([0-9]+)/([1-9][0-9]*)')
def split_list(l, split, density=None):
"""
Splits a list into part a of b
split should be a string of the form 'a/b'. For instance, '1/3' would give
the split one of three.
If the length of the list is not divisible by the number of splits, the
last split will have more items.
`density` may be specified as a list. If specified,
tests will be balanced so that each split has as equal-as-possible
amount of mass according to `density`.
>>> from sympy.utilities.runtests import split_list
>>> a = list(range(10))
>>> split_list(a, '1/3')
[0, 1, 2]
>>> split_list(a, '2/3')
[3, 4, 5]
>>> split_list(a, '3/3')
[6, 7, 8, 9]
"""
m = sp.match(split)
if not m:
raise ValueError("split must be a string of the form a/b where a and b are ints")
i, t = map(int, m.groups())
if not density:
return l[(i - 1)*len(l)//t : i*len(l)//t]
# normalize density
tot = sum(density)
density = [x / tot for x in density]
def density_inv(x):
"""Interpolate the inverse to the cumulative
distribution function given by density"""
if x <= 0:
return 0
if x >= sum(density):
return 1
# find the first time the cumulative sum surpasses x
# and linearly interpolate
cumm = 0
for i, d in enumerate(density):
cumm += d
if cumm >= x:
break
frac = (d - (cumm - x)) / d
return (i + frac) / len(density)
lower_frac = density_inv((i - 1) / t)
higher_frac = density_inv(i / t)
return l[int(lower_frac*len(l)) : int(higher_frac*len(l))]
from collections import namedtuple
SymPyTestResults = namedtuple('TestResults', 'failed attempted')
def sympytestfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False,
parser=pdoctest.DocTestParser(), encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg ``module_relative`` specifies how filenames
should be interpreted:
- If ``module_relative`` is True (the default), then ``filename``
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
``package`` argument is specified, then it is relative to that
package. To ensure os-independence, ``filename`` should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If ``module_relative`` is False, then ``filename`` specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg ``name`` gives the name of the test; by default
use the file's basename.
Optional keyword argument ``package`` is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify ``package`` if ``module_relative`` is False.
Optional keyword arg ``globs`` gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg ``extraglobs`` gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg ``verbose`` prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg ``report`` prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg ``optionflags`` or's together module constants,
and defaults to 0. Possible values (see the docs for details):
- DONT_ACCEPT_TRUE_FOR_1
- DONT_ACCEPT_BLANKLINE
- NORMALIZE_WHITESPACE
- ELLIPSIS
- SKIP
- IGNORE_EXCEPTION_DETAIL
- REPORT_UDIFF
- REPORT_CDIFF
- REPORT_NDIFF
- REPORT_ONLY_FIRST_FAILURE
Optional keyword arg ``raise_on_error`` raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg ``parser`` specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg ``encoding`` specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if not PY3:
text, filename = pdoctest._load_testfile(
filename, package, module_relative)
if encoding is not None:
text = text.decode(encoding)
else:
text, filename = pdoctest._load_testfile(
filename, package, module_relative, encoding)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = pdoctest.DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = SymPyDocTestRunner(verbose=verbose, optionflags=optionflags)
runner._checker = SymPyOutputChecker()
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test, compileflags=future_flags)
if report:
runner.summarize()
if pdoctest.master is None:
pdoctest.master = runner
else:
pdoctest.master.merge(runner)
return SymPyTestResults(runner.failures, runner.tries)
class SymPyTests(object):
def __init__(self, reporter, kw="", post_mortem=False,
seed=None, fast_threshold=None, slow_threshold=None):
self._post_mortem = post_mortem
self._kw = kw
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._testfiles = []
self._seed = seed if seed is not None else random.random()
# Defaults in seconds, from human / UX design limits
# http://www.nngroup.com/articles/response-times-3-important-limits/
#
# These defaults are *NOT* set in stone as we are measuring different
# things, so others feel free to come up with a better yardstick :)
if fast_threshold:
self._fast_threshold = float(fast_threshold)
else:
self._fast_threshold = 5
if slow_threshold:
self._slow_threshold = float(slow_threshold)
else:
self._slow_threshold = 10
def test(self, sort=False, timeout=False, slow=False,
enhance_asserts=False, fail_on_timeout=False):
"""
Runs the tests returning True if all tests pass, otherwise False.
If sort=False run tests in random order.
"""
if sort:
self._testfiles.sort()
elif slow:
pass
else:
random.seed(self._seed)
random.shuffle(self._testfiles)
self._reporter.start(self._seed)
for f in self._testfiles:
try:
self.test_file(f, sort, timeout, slow,
enhance_asserts, fail_on_timeout)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def _enhance_asserts(self, source):
from ast import (NodeTransformer, Compare, Name, Store, Load, Tuple,
Assign, BinOp, Str, Mod, Assert, parse, fix_missing_locations)
ops = {"Eq": '==', "NotEq": '!=', "Lt": '<', "LtE": '<=',
"Gt": '>', "GtE": '>=', "Is": 'is', "IsNot": 'is not',
"In": 'in', "NotIn": 'not in'}
class Transform(NodeTransformer):
def visit_Assert(self, stmt):
if isinstance(stmt.test, Compare):
compare = stmt.test
values = [compare.left] + compare.comparators
names = [ "_%s" % i for i, _ in enumerate(values) ]
names_store = [ Name(n, Store()) for n in names ]
names_load = [ Name(n, Load()) for n in names ]
target = Tuple(names_store, Store())
value = Tuple(values, Load())
assign = Assign([target], value)
new_compare = Compare(names_load[0], compare.ops, names_load[1:])
msg_format = "\n%s " + "\n%s ".join([ ops[op.__class__.__name__] for op in compare.ops ]) + "\n%s"
msg = BinOp(Str(msg_format), Mod(), Tuple(names_load, Load()))
test = Assert(new_compare, msg, lineno=stmt.lineno, col_offset=stmt.col_offset)
return [assign, test]
else:
return stmt
tree = parse(source)
new_tree = Transform().visit(tree)
return fix_missing_locations(new_tree)
def test_file(self, filename, sort=True, timeout=False, slow=False,
enhance_asserts=False, fail_on_timeout=False):
reporter = self._reporter
funcs = []
try:
gl = {'__file__': filename}
try:
if PY3:
open_file = lambda: open(filename, encoding="utf8")
else:
open_file = lambda: open(filename)
with open_file() as f:
source = f.read()
if self._kw:
for l in source.splitlines():
if l.lstrip().startswith('def '):
if any(l.find(k) != -1 for k in self._kw):
break
else:
return
if enhance_asserts:
try:
source = self._enhance_asserts(source)
except ImportError:
pass
code = compile(source, filename, "exec", flags=0, dont_inherit=True)
exec_(code, gl)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
reporter.import_error(filename, sys.exc_info())
return
except Exception:
reporter.test_exception(sys.exc_info())
clear_cache()
self._count += 1
random.seed(self._seed)
disabled = gl.get("disabled", False)
if not disabled:
# we need to filter only those functions that begin with 'test_'
# We have to be careful about decorated functions. As long as
# the decorator uses functools.wraps, we can detect it.
funcs = []
for f in gl:
if (f.startswith("test_") and (inspect.isfunction(gl[f])
or inspect.ismethod(gl[f]))):
func = gl[f]
# Handle multiple decorators
while hasattr(func, '__wrapped__'):
func = func.__wrapped__
if inspect.getsourcefile(func) == filename:
funcs.append(gl[f])
if slow:
funcs = [f for f in funcs if getattr(f, '_slow', False)]
# Sorting of XFAILed functions isn't fixed yet :-(
funcs.sort(key=lambda x: inspect.getsourcelines(x)[1])
i = 0
while i < len(funcs):
if inspect.isgeneratorfunction(funcs[i]):
# some tests can be generators, that return the actual
# test functions. We unpack it below:
f = funcs.pop(i)
for fg in f():
func = fg[0]
args = fg[1:]
fgw = lambda: func(*args)
funcs.insert(i, fgw)
i += 1
else:
i += 1
# drop functions that are not selected with the keyword expression:
funcs = [x for x in funcs if self.matches(x)]
if not funcs:
return
except Exception:
reporter.entering_filename(filename, len(funcs))
raise
reporter.entering_filename(filename, len(funcs))
if not sort:
random.shuffle(funcs)
for f in funcs:
start = time.time()
reporter.entering_test(f)
try:
if getattr(f, '_slow', False) and not slow:
raise Skipped("Slow")
if timeout:
self._timeout(f, timeout, fail_on_timeout)
else:
random.seed(self._seed)
f()
except KeyboardInterrupt:
if getattr(f, '_slow', False):
reporter.test_skip("KeyboardInterrupt")
else:
raise
except Exception:
if timeout:
signal.alarm(0) # Disable the alarm. It could not be handled before.
t, v, tr = sys.exc_info()
if t is AssertionError:
reporter.test_fail((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
elif t.__name__ == "Skipped":
reporter.test_skip(v)
elif t.__name__ == "XFail":
reporter.test_xfail()
elif t.__name__ == "XPass":
reporter.test_xpass(v)
else:
reporter.test_exception((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
else:
reporter.test_pass()
taken = time.time() - start
if taken > self._slow_threshold:
reporter.slow_test_functions.append((f.__name__, taken))
if getattr(f, '_slow', False) and slow:
if taken < self._fast_threshold:
reporter.fast_test_functions.append((f.__name__, taken))
reporter.leaving_filename()
def _timeout(self, function, timeout, fail_on_timeout):
def callback(x, y):
signal.alarm(0)
if fail_on_timeout:
raise TimeOutError("Timed out after %d seconds" % timeout)
else:
raise Skipped("Timeout")
signal.signal(signal.SIGALRM, callback)
signal.alarm(timeout) # Set an alarm with a given timeout
function()
signal.alarm(0) # Disable the alarm
def matches(self, x):
"""
Does the keyword expression self._kw match "x"? Returns True/False.
Always returns True if self._kw is "".
"""
if not self._kw:
return True
for kw in self._kw:
if x.__name__.find(kw) != -1:
return True
return False
def get_test_files(self, dir, pat='test_*.py'):
"""
Returns the list of test_*.py (default) files at or below directory
``dir`` relative to the sympy home directory.
"""
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files if fnmatch(f, pat)])
return sorted([os.path.normcase(gi) for gi in g])
class SymPyDocTests(object):
def __init__(self, reporter, normal):
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._normal = normal
self._testfiles = []
def test(self):
"""
Runs the tests and returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._testfiles:
try:
self.test_file(f)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def test_file(self, filename):
clear_cache()
from sympy.core.compatibility import StringIO
import sympy.interactive.printing as interactive_printing
from sympy import pprint_use_unicode
rel_name = filename[len(self._root_dir) + 1:]
dirname, file = os.path.split(filename)
module = rel_name.replace(os.sep, '.')[:-3]
if rel_name.startswith("examples"):
# Examples files do not have __init__.py files,
# So we have to temporarily extend sys.path to import them
sys.path.insert(0, dirname)
module = file[:-3] # remove ".py"
try:
module = pdoctest._normalize_module(module)
tests = SymPyDocTestFinder().find(module)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
self._reporter.import_error(filename, sys.exc_info())
return
finally:
if rel_name.startswith("examples"):
del sys.path[0]
tests = [test for test in tests if len(test.examples) > 0]
# By default tests are sorted by alphabetical order by function name.
# We sort by line number so one can edit the file sequentially from
# bottom to top. However, if there are decorated functions, their line
# numbers will be too large and for now one must just search for these
# by text and function name.
tests.sort(key=lambda x: -x.lineno)
if not tests:
return
self._reporter.entering_filename(filename, len(tests))
for test in tests:
assert len(test.examples) != 0
if self._reporter._verbose:
self._reporter.write("\n{} ".format(test.name))
# check if there are external dependencies which need to be met
if '_doctest_depends_on' in test.globs:
try:
self._check_dependencies(**test.globs['_doctest_depends_on'])
except DependencyError as e:
self._reporter.test_skip(v=str(e))
continue
runner = SymPyDocTestRunner(optionflags=pdoctest.ELLIPSIS |
pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
runner._checker = SymPyOutputChecker()
old = sys.stdout
new = StringIO()
sys.stdout = new
# If the testing is normal, the doctests get importing magic to
# provide the global namespace. If not normal (the default) then
# then must run on their own; all imports must be explicit within
# a function's docstring. Once imported that import will be
# available to the rest of the tests in a given function's
# docstring (unless clear_globs=True below).
if not self._normal:
test.globs = {}
# if this is uncommented then all the test would get is what
# comes by default with a "from sympy import *"
#exec('from sympy import *') in test.globs
test.globs['print_function'] = print_function
old_displayhook = sys.displayhook
use_unicode_prev = setup_pprint()
try:
f, t = runner.run(test, compileflags=future_flags,
out=new.write, clear_globs=False)
except KeyboardInterrupt:
raise
finally:
sys.stdout = old
if f > 0:
self._reporter.doctest_fail(test.name, new.getvalue())
else:
self._reporter.test_pass()
sys.displayhook = old_displayhook
interactive_printing.NO_GLOBAL = False
pprint_use_unicode(use_unicode_prev)
self._reporter.leaving_filename()
def get_test_files(self, dir, pat='*.py', init_only=True):
r"""
Returns the list of \*.py files (default) from which docstrings
will be tested which are at or below directory ``dir``. By default,
only those that have an __init__.py in their parent directory
and do not start with ``test_`` will be included.
"""
def importable(x):
"""
Checks if given pathname x is an importable module by checking for
__init__.py file.
Returns True/False.
Currently we only test if the __init__.py file exists in the
directory with the file "x" (in theory we should also test all the
parent dirs).
"""
init_py = os.path.join(os.path.dirname(x), "__init__.py")
return os.path.exists(init_py)
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files
if not f.startswith('test_') and fnmatch(f, pat)])
if init_only:
# skip files that are not importable (i.e. missing __init__.py)
g = [x for x in g if importable(x)]
return [os.path.normcase(gi) for gi in g]
def _check_dependencies(self,
executables=(),
modules=(),
disable_viewers=(),
python_version=(2,)):
"""
Checks if the dependencies for the test are installed.
Raises ``DependencyError`` it at least one dependency is not installed.
"""
for executable in executables:
if not find_executable(executable):
raise DependencyError("Could not find %s" % executable)
for module in modules:
if module == 'matplotlib':
matplotlib = import_module(
'matplotlib',
__import__kwargs={'fromlist':
['pyplot', 'cm', 'collections']},
min_module_version='1.0.0', catch=(RuntimeError,))
if matplotlib is None:
raise DependencyError("Could not import matplotlib")
else:
if not import_module(module):
raise DependencyError("Could not import %s" % module)
if disable_viewers:
tempdir = tempfile.mkdtemp()
os.environ['PATH'] = '%s:%s' % (tempdir, os.environ['PATH'])
vw = ('#!/usr/bin/env {}\n'
'import sys\n'
'if len(sys.argv) <= 1:\n'
' exit("wrong number of args")\n').format(
'python3' if PY3 else 'python')
for viewer in disable_viewers:
with open(os.path.join(tempdir, viewer), 'w') as fh:
fh.write(vw)
# make the file executable
os.chmod(os.path.join(tempdir, viewer),
stat.S_IREAD | stat.S_IWRITE | stat.S_IXUSR)
if python_version:
if sys.version_info < python_version:
raise DependencyError("Requires Python >= " + '.'.join(map(str, python_version)))
if 'pyglet' in modules:
# monkey-patch pyglet s.t. it does not open a window during
# doctesting
import pyglet
class DummyWindow(object):
def __init__(self, *args, **kwargs):
self.has_exit = True
self.width = 600
self.height = 400
def set_vsync(self, x):
pass
def switch_to(self):
pass
def push_handlers(self, x):
pass
def close(self):
pass
pyglet.window.Window = DummyWindow
class SymPyDocTestFinder(DocTestFinder):
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
Modified from doctest's version to look harder for code that
appears comes from a different module. For example, the @vectorize
decorator makes it look like functions come from multidimensional.py
even though their code exists elsewhere.
"""
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to ``tests``.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Make sure we don't run doctests for classes outside of sympy, such
# as in numpy or scipy.
if inspect.isclass(obj):
if obj.__module__.split('.')[0] != 'sympy':
return
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
if not self._recurse:
return
# Look for tests in a module's contained objects.
if inspect.ismodule(obj):
for rawname, val in obj.__dict__.items():
# Recurse to functions & classes.
if inspect.isfunction(val) or inspect.isclass(val):
# Make sure we don't run doctests functions or classes
# from different modules
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (rawname %s)" % (val, module, rawname)
try:
valname = '%s.%s' % (name, rawname)
self._find(tests, val, valname, module,
source_lines, globs, seen)
except KeyboardInterrupt:
raise
# Look for tests in a module's __test__ dictionary.
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, string_types):
raise ValueError("SymPyDocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, string_types)):
raise ValueError("SymPyDocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj):
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(unwrap(val)) or
inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
# Make sure we don't run doctests functions or classes
# from different modules
if isinstance(val, property):
if hasattr(val.fget, '__module__'):
if val.fget.__module__ != module.__name__:
continue
else:
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (valname %s)" % (
val, module, valname)
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
lineno = None
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, string_types):
# obj is a string in the case for objects in the polys package.
# Note that source_lines is a binary string (compiled polys
# modules), which can't be handled by _find_lineno so determine
# the line number here.
docstring = obj
matches = re.findall(r"line \d+", name)
assert len(matches) == 1, \
"string '%s' does not contain lineno " % name
# NOTE: this is not the exact linenumber but its better than no
# lineno ;)
lineno = int(matches[0][5:])
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, string_types):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# check that properties have a docstring because _find_lineno
# assumes it
if isinstance(obj, property):
if obj.fget.__doc__ is None:
return None
# Find the docstring's location in the file.
if lineno is None:
obj = unwrap(obj)
# handling of properties is not implemented in _find_lineno so do
# it here
if hasattr(obj, 'func_closure') and obj.func_closure is not None:
tobj = obj.func_closure[0].cell_contents
elif isinstance(obj, property):
tobj = obj.fget
else:
tobj = obj
lineno = self._find_lineno(tobj, source_lines)
if lineno is None:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
globs['_doctest_depends_on'] = getattr(obj, '_doctest_depends_on', {})
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
class SymPyDocTestRunner(DocTestRunner):
"""
A class used to run DocTest test cases, and accumulate statistics.
The ``run`` method is used to process a single DocTest case. It
returns a tuple ``(f, t)``, where ``t`` is the number of test cases
tried, and ``f`` is the number of test cases that failed.
Modified from the doctest version to not reset the sys.displayhook (see
issue 5140).
See the docstring of the original DocTestRunner for more information.
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in ``test``, and display the results using the
writer function ``out``.
The examples are run in the namespace ``test.globs``. If
``clear_globs`` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use ``clear_globs=False``.
``compileflags`` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to ``globs``.
The output of each example is checked using
``SymPyDocTestRunner.check_output``, and the results are
formatted by the ``SymPyDocTestRunner.report_*`` methods.
"""
self.test = test
if compileflags is None:
compileflags = pdoctest._extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = pdoctest._OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = pdoctest.linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
test.globs['print_function'] = print_function
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
# We have to override the name mangled methods.
SymPyDocTestRunner._SymPyDocTestRunner__patched_linecache_getlines = \
DocTestRunner._DocTestRunner__patched_linecache_getlines
SymPyDocTestRunner._SymPyDocTestRunner__run = DocTestRunner._DocTestRunner__run
SymPyDocTestRunner._SymPyDocTestRunner__record_outcome = \
DocTestRunner._DocTestRunner__record_outcome
class SymPyOutputChecker(pdoctest.OutputChecker):
"""
Compared to the OutputChecker from the stdlib our OutputChecker class
supports numerical comparison of floats occurring in the output of the
doctest examples
"""
def __init__(self):
# NOTE OutputChecker is an old-style class with no __init__ method,
# so we can't call the base class version of __init__ here
got_floats = r'(\d+\.\d*|\.\d+)'
# floats in the 'want' string may contain ellipses
want_floats = got_floats + r'(\.{3})?'
front_sep = r'\s|\+|\-|\*|,'
back_sep = front_sep + r'|j|e'
fbeg = r'^%s(?=%s|$)' % (got_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, got_floats, back_sep)
self.num_got_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
fbeg = r'^%s(?=%s|$)' % (want_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, want_floats, back_sep)
self.num_want_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# TODO parse integers as well ?
# Parse floats and compare them. If some of the parsed floats contain
# ellipses, skip the comparison.
matches = self.num_got_rgx.finditer(got)
numbers_got = [match.group(1) for match in matches] # list of strs
matches = self.num_want_rgx.finditer(want)
numbers_want = [match.group(1) for match in matches] # list of strs
if len(numbers_got) != len(numbers_want):
return False
if len(numbers_got) > 0:
nw_ = []
for ng, nw in zip(numbers_got, numbers_want):
if '...' in nw:
nw_.append(ng)
continue
else:
nw_.append(nw)
if abs(float(ng)-float(nw)) > 1e-5:
return False
got = self.num_got_rgx.sub(r'%s', got)
got = got % tuple(nw_)
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub(r'(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub(r'(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & pdoctest.NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & pdoctest.ELLIPSIS:
if pdoctest._ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
class Reporter(object):
"""
Parent class for all reporters.
"""
pass
class PyTestReporter(Reporter):
"""
Py.test like reporter. Should produce output identical to py.test.
"""
def __init__(self, verbose=False, tb="short", colors=True,
force_colors=False, split=None):
self._verbose = verbose
self._tb_style = tb
self._colors = colors
self._force_colors = force_colors
self._xfailed = 0
self._xpassed = []
self._failed = []
self._failed_doctest = []
self._passed = 0
self._skipped = 0
self._exceptions = []
self._terminal_width = None
self._default_width = 80
self._split = split
self._active_file = ''
self._active_f = None
# TODO: Should these be protected?
self.slow_test_functions = []
self.fast_test_functions = []
# this tracks the x-position of the cursor (useful for positioning
# things on the screen), without the need for any readline library:
self._write_pos = 0
self._line_wrap = False
def root_dir(self, dir):
self._root_dir = dir
@property
def terminal_width(self):
if self._terminal_width is not None:
return self._terminal_width
def findout_terminal_width():
if sys.platform == "win32":
# Windows support is based on:
#
# http://code.activestate.com/recipes/
# 440694-determine-size-of-console-window-on-windows/
from ctypes import windll, create_string_buffer
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(_, _, _, _, _, left, _, right, _, _, _) = \
struct.unpack("hhhhHhhhhhh", csbi.raw)
return right - left
else:
return self._default_width
if hasattr(sys.stdout, 'isatty') and not sys.stdout.isatty():
return self._default_width # leave PIPEs alone
try:
process = subprocess.Popen(['stty', '-a'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = process.stdout.read()
if PY3:
stdout = stdout.decode("utf-8")
except (OSError, IOError):
pass
else:
# We support the following output formats from stty:
#
# 1) Linux -> columns 80
# 2) OS X -> 80 columns
# 3) Solaris -> columns = 80
re_linux = r"columns\s+(?P<columns>\d+);"
re_osx = r"(?P<columns>\d+)\s*columns;"
re_solaris = r"columns\s+=\s+(?P<columns>\d+);"
for regex in (re_linux, re_osx, re_solaris):
match = re.search(regex, stdout)
if match is not None:
columns = match.group('columns')
try:
width = int(columns)
except ValueError:
pass
if width != 0:
return width
return self._default_width
width = findout_terminal_width()
self._terminal_width = width
return width
def write(self, text, color="", align="left", width=None,
force_colors=False):
"""
Prints a text on the screen.
It uses sys.stdout.write(), so no readline library is necessary.
Parameters
==========
color : choose from the colors below, "" means default color
align : "left"/"right", "left" is a normal print, "right" is aligned on
the right-hand side of the screen, filled with spaces if
necessary
width : the screen width
"""
color_templates = (
("Black", "0;30"),
("Red", "0;31"),
("Green", "0;32"),
("Brown", "0;33"),
("Blue", "0;34"),
("Purple", "0;35"),
("Cyan", "0;36"),
("LightGray", "0;37"),
("DarkGray", "1;30"),
("LightRed", "1;31"),
("LightGreen", "1;32"),
("Yellow", "1;33"),
("LightBlue", "1;34"),
("LightPurple", "1;35"),
("LightCyan", "1;36"),
("White", "1;37"),
)
colors = {}
for name, value in color_templates:
colors[name] = value
c_normal = '\033[0m'
c_color = '\033[%sm'
if width is None:
width = self.terminal_width
if align == "right":
if self._write_pos + len(text) > width:
# we don't fit on the current line, create a new line
self.write("\n")
self.write(" "*(width - self._write_pos - len(text)))
if not self._force_colors and hasattr(sys.stdout, 'isatty') and not \
sys.stdout.isatty():
# the stdout is not a terminal, this for example happens if the
# output is piped to less, e.g. "bin/test | less". In this case,
# the terminal control sequences would be printed verbatim, so
# don't use any colors.
color = ""
elif sys.platform == "win32":
# Windows consoles don't support ANSI escape sequences
color = ""
elif not self._colors:
color = ""
if self._line_wrap:
if text[0] != "\n":
sys.stdout.write("\n")
# Avoid UnicodeEncodeError when printing out test failures
if PY3 and IS_WINDOWS:
text = text.encode('raw_unicode_escape').decode('utf8', 'ignore')
elif PY3 and not sys.stdout.encoding.lower().startswith('utf'):
text = text.encode(sys.stdout.encoding, 'backslashreplace'
).decode(sys.stdout.encoding)
if color == "":
sys.stdout.write(text)
else:
sys.stdout.write("%s%s%s" %
(c_color % colors[color], text, c_normal))
sys.stdout.flush()
l = text.rfind("\n")
if l == -1:
self._write_pos += len(text)
else:
self._write_pos = len(text) - l - 1
self._line_wrap = self._write_pos >= width
self._write_pos %= width
def write_center(self, text, delim="="):
width = self.terminal_width
if text != "":
text = " %s " % text
idx = (width - len(text)) // 2
t = delim*idx + text + delim*(width - idx - len(text))
self.write(t + "\n")
def write_exception(self, e, val, tb):
# remove the first item, as that is always runtests.py
tb = tb.tb_next
t = traceback.format_exception(e, val, tb)
self.write("".join(t))
def start(self, seed=None, msg="test process starts"):
self.write_center(msg)
executable = sys.executable
v = tuple(sys.version_info)
python_version = "%s.%s.%s-%s-%s" % v
implementation = platform.python_implementation()
if implementation == 'PyPy':
implementation += " %s.%s.%s-%s-%s" % sys.pypy_version_info
self.write("executable: %s (%s) [%s]\n" %
(executable, python_version, implementation))
from .misc import ARCH
self.write("architecture: %s\n" % ARCH)
from sympy.core.cache import USE_CACHE
self.write("cache: %s\n" % USE_CACHE)
from sympy.core.compatibility import GROUND_TYPES, HAS_GMPY
version = ''
if GROUND_TYPES =='gmpy':
if HAS_GMPY == 1:
import gmpy
elif HAS_GMPY == 2:
import gmpy2 as gmpy
version = gmpy.version()
self.write("ground types: %s %s\n" % (GROUND_TYPES, version))
numpy = import_module('numpy')
self.write("numpy: %s\n" % (None if not numpy else numpy.__version__))
if seed is not None:
self.write("random seed: %d\n" % seed)
from .misc import HASH_RANDOMIZATION
self.write("hash randomization: ")
hash_seed = os.getenv("PYTHONHASHSEED") or '0'
if HASH_RANDOMIZATION and (hash_seed == "random" or int(hash_seed)):
self.write("on (PYTHONHASHSEED=%s)\n" % hash_seed)
else:
self.write("off\n")
if self._split:
self.write("split: %s\n" % self._split)
self.write('\n')
self._t_start = clock()
def finish(self):
self._t_end = clock()
self.write("\n")
global text, linelen
text = "tests finished: %d passed, " % self._passed
linelen = len(text)
def add_text(mytext):
global text, linelen
"""Break new text if too long."""
if linelen + len(mytext) > self.terminal_width:
text += '\n'
linelen = 0
text += mytext
linelen += len(mytext)
if len(self._failed) > 0:
add_text("%d failed, " % len(self._failed))
if len(self._failed_doctest) > 0:
add_text("%d failed, " % len(self._failed_doctest))
if self._skipped > 0:
add_text("%d skipped, " % self._skipped)
if self._xfailed > 0:
add_text("%d expected to fail, " % self._xfailed)
if len(self._xpassed) > 0:
add_text("%d expected to fail but passed, " % len(self._xpassed))
if len(self._exceptions) > 0:
add_text("%d exceptions, " % len(self._exceptions))
add_text("in %.2f seconds" % (self._t_end - self._t_start))
if self.slow_test_functions:
self.write_center('slowest tests', '_')
sorted_slow = sorted(self.slow_test_functions, key=lambda r: r[1])
for slow_func_name, taken in sorted_slow:
print('%s - Took %.3f seconds' % (slow_func_name, taken))
if self.fast_test_functions:
self.write_center('unexpectedly fast tests', '_')
sorted_fast = sorted(self.fast_test_functions,
key=lambda r: r[1])
for fast_func_name, taken in sorted_fast:
print('%s - Took %.3f seconds' % (fast_func_name, taken))
if len(self._xpassed) > 0:
self.write_center("xpassed tests", "_")
for e in self._xpassed:
self.write("%s: %s\n" % (e[0], e[1]))
self.write("\n")
if self._tb_style != "no" and len(self._exceptions) > 0:
for e in self._exceptions:
filename, f, (t, val, tb) = e
self.write_center("", "_")
if f is None:
s = "%s" % filename
else:
s = "%s:%s" % (filename, f.__name__)
self.write_center(s, "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed) > 0:
for e in self._failed:
filename, f, (t, val, tb) = e
self.write_center("", "_")
self.write_center("%s:%s" % (filename, f.__name__), "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed_doctest) > 0:
for e in self._failed_doctest:
filename, msg = e
self.write_center("", "_")
self.write_center("%s" % filename, "_")
self.write(msg)
self.write("\n")
self.write_center(text)
ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \
len(self._failed_doctest) == 0
if not ok:
self.write("DO *NOT* COMMIT!\n")
return ok
def entering_filename(self, filename, n):
rel_name = filename[len(self._root_dir) + 1:]
self._active_file = rel_name
self._active_file_error = False
self.write(rel_name)
self.write("[%d] " % n)
def leaving_filename(self):
self.write(" ")
if self._active_file_error:
self.write("[FAIL]", "Red", align="right")
else:
self.write("[OK]", "Green", align="right")
self.write("\n")
if self._verbose:
self.write("\n")
def entering_test(self, f):
self._active_f = f
if self._verbose:
self.write("\n" + f.__name__ + " ")
def test_xfail(self):
self._xfailed += 1
self.write("f", "Green")
def test_xpass(self, v):
message = str(v)
self._xpassed.append((self._active_file, message))
self.write("X", "Green")
def test_fail(self, exc_info):
self._failed.append((self._active_file, self._active_f, exc_info))
self.write("F", "Red")
self._active_file_error = True
def doctest_fail(self, name, error_msg):
# the first line contains "******", remove it:
error_msg = "\n".join(error_msg.split("\n")[1:])
self._failed_doctest.append((name, error_msg))
self.write("F", "Red")
self._active_file_error = True
def test_pass(self, char="."):
self._passed += 1
if self._verbose:
self.write("ok", "Green")
else:
self.write(char, "Green")
def test_skip(self, v=None):
char = "s"
self._skipped += 1
if v is not None:
message = str(v)
if message == "KeyboardInterrupt":
char = "K"
elif message == "Timeout":
char = "T"
elif message == "Slow":
char = "w"
if self._verbose:
if v is not None:
self.write(message + ' ', "Blue")
else:
self.write(" - ", "Blue")
self.write(char, "Blue")
def test_exception(self, exc_info):
self._exceptions.append((self._active_file, self._active_f, exc_info))
if exc_info[0] is TimeOutError:
self.write("T", "Red")
else:
self.write("E", "Red")
self._active_file_error = True
def import_error(self, filename, exc_info):
self._exceptions.append((filename, None, exc_info))
rel_name = filename[len(self._root_dir) + 1:]
self.write(rel_name)
self.write("[?] Failed to import", "Red")
self.write(" ")
self.write("[FAIL]", "Red", align="right")
self.write("\n")
sympy_dir = get_sympy_dir()
| 37.308511
| 261
| 0.579093
|
ff498abac2208a612be5549f3d8883b9d48d6f03
| 1,075
|
py
|
Python
|
src/activities_score/migrations/0004_auto_20201223_2212.py
|
andhrelja/timetabled
|
3d2d5ca91abc0b4cace4176d3b6849969f7211b5
|
[
"PostgreSQL"
] | 1
|
2021-05-07T00:20:42.000Z
|
2021-05-07T00:20:42.000Z
|
src/activities_score/migrations/0004_auto_20201223_2212.py
|
andhrelja/timetabled
|
3d2d5ca91abc0b4cace4176d3b6849969f7211b5
|
[
"PostgreSQL"
] | null | null | null |
src/activities_score/migrations/0004_auto_20201223_2212.py
|
andhrelja/timetabled
|
3d2d5ca91abc0b4cace4176d3b6849969f7211b5
|
[
"PostgreSQL"
] | null | null | null |
# Generated by Django 3.1.3 on 2020-12-23 21:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('activities_score', '0003_auto_20201217_1831'),
]
operations = [
migrations.AlterField(
model_name='globalscoreactivity',
name='type',
field=models.IntegerField(choices=[(0, 'Kolokvij'), (1, 'Seminar'), (2, 'Esej'), (3, 'Prezentacija'), (4, 'Domaća zadaća'), (5, 'Laboratorijske vježbe'), (6, 'Završni ispit'), (7, 'Projekt'), (8, 'Rasprava'), (9, 'Timski rad'), (10, 'Kviz'), (11, 'Ostalo')], verbose_name='Tip aktivnosti'),
),
migrations.AlterField(
model_name='studentscoreactivity',
name='type',
field=models.IntegerField(choices=[(0, 'Kolokvij'), (1, 'Seminar'), (2, 'Esej'), (3, 'Prezentacija'), (4, 'Domaća zadaća'), (5, 'Laboratorijske vježbe'), (6, 'Završni ispit'), (7, 'Projekt'), (8, 'Rasprava'), (9, 'Timski rad'), (10, 'Kviz'), (11, 'Ostalo')], verbose_name='Tip aktivnosti'),
),
]
| 44.791667
| 302
| 0.588837
|
231ee94449938de07724a35653ac4f9f5cbf2d3a
| 4,569
|
py
|
Python
|
api.py
|
stemid/passwordfrank
|
cf0b16fd00ae850d93b513c4c2e3d45d5d324cec
|
[
"CC0-1.0"
] | null | null | null |
api.py
|
stemid/passwordfrank
|
cf0b16fd00ae850d93b513c4c2e3d45d5d324cec
|
[
"CC0-1.0"
] | null | null | null |
api.py
|
stemid/passwordfrank
|
cf0b16fd00ae850d93b513c4c2e3d45d5d324cec
|
[
"CC0-1.0"
] | null | null | null |
from datetime import datetime, timedelta
from uuid import uuid4
import json
import web
import settings, model
from settings import generate_password, base36encode, base36decode
# Helper function for formatting datetime objects to json
def dateHandler(obj):
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%d %H:%M')
return None
class password:
# GET /password/foo HTTP/1.0
def GET(self, arg=None):
# Get query params
query = web.input(
bits = 6,
words = 5
)
# Change output to JSON
web.header('Content-type', 'application/json')
# If no pattern at the end of the url,
# we will generate a random password
if not arg:
try:
words = model.get_words(results = 2**int(query.bits))
# Convert iterator
wordlist = []
for word in words:
wordlist.append(word.word)
except(), e:
web.internalerror(str(e))
raise
try:
generatedPass = generate_password(
int(query.words), wordlist
)
except(), e:
web.internalerror(str(e))
raise
web.ok()
return json.dumps(dict(phrase=generatedPass))
# Request for a pre-existing phrase
phraseCode = base36decode(arg)
try:
phrase = model.get_phrase(base36decode(phraseCode))
seqID = phrase.get('id')
except(model.ModelError), e:
web.notfound()
return json.dumps(dict(error='not found'))
except(), e:
web.internalerror(str(e))
raise
# Update number of views
try:
model.update_phrase(seqID)
except(), e:
web.internalerror(str(e))
raise
# Get results from row
results = {}
results['phrase'] = phrase.get('phrase', None)
results['code'] = phrase.get('code')
results['created'] = phrase.get('created', None)
results['maxdays'] = phrase.get('maxdays', 10)
results['maxviews'] = phrase.get('maxviews', 10)
results['views'] = phrase.get('views', 1)+1
# Calculate if maxviews or maxdays has been reached
deleteDate = results['created'] + timedelta(results['maxdays'])
today = datetime.now()
if today >= deleteDate or results['views'] >= results['maxviews']:
try:
deleted = model.delete_phrase(seqID)
except(), e:
web.internalerror(str(e))
raise
# Return results to client
web.ok()
return json.dumps(results, default=dateHandler)
# POST /password HTTP/1.0
def POST(self):
# Receive the passphrase through query params
query = web.input(
password = None,
maxdays = 10,
maxviews = 10
)
# Change output to JSON
web.header('Content-type', 'application/json')
# Generate unique code for phrase
uuid = uuid4()
phraseCode = str(uuid).split('-')[0]
try:
phraseid = model.add_phrase(
phrase = query.password,
code = base36decode(phraseCode),
maxdays = int(query.maxdays),
maxviews = int(query.maxviews)
)
except(model.ModelError), e:
web.internalerror(str(e))
return json.dumps(dict(error=str(e)))
except(), e:
web.internalerror(str(e))
return json.dumps(dict(error=str(e)))
web.created()
return json.dumps(dict(
phrase = query.password,
code = phraseCode
))
# DELETE /password/foo HTTP/1.0
def DELETE(self, arg):
# Change output to JSON
web.header('Content-type', 'application/json')
if not arg:
web.internalerror()
return json.dumps(dict(error='must have code'))
try:
phrase = model.get_phrase(code=base36decode(arg))
seqID = phrase.get('id')
except(), e:
web.notfound()
return json.dumps(dict(error='not found'))
try:
model.delete_phrase(seqID)
except(), e:
web.internalerror(str(e))
return json.dumps(dict(error=str(e)))
web.ok()
return json.dumps(dict(status='%s deleted' % arg))
| 29.668831
| 74
| 0.531188
|
5ce083583c2bd7a235adb862be086f7241de28f6
| 33,270
|
py
|
Python
|
pytext/config/config_adapter.py
|
yinghai/pytext
|
5457c157d7a5f39bb96e2f207560cc52d9b98c83
|
[
"BSD-3-Clause"
] | null | null | null |
pytext/config/config_adapter.py
|
yinghai/pytext
|
5457c157d7a5f39bb96e2f207560cc52d9b98c83
|
[
"BSD-3-Clause"
] | null | null | null |
pytext/config/config_adapter.py
|
yinghai/pytext
|
5457c157d7a5f39bb96e2f207560cc52d9b98c83
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from pytext.common.utils import eprint
from .pytext_config import LATEST_VERSION, PyTextConfig
ADAPTERS = {}
DOWNGRADE_ADAPTERS = {}
NOT_THERE = (None, None, None)
def register_adapter(from_version):
def decorator(fn):
if from_version in ADAPTERS:
raise Exception(
"Duplicated adapter from_version={}: '{}' and '{}'".format(
from_version, fn.__name__, ADAPTERS[from_version].__name__
)
)
else:
ADAPTERS[from_version] = fn
return fn
return decorator
def register_down_grade_adapter(from_version):
def decorator(fn):
if from_version in DOWNGRADE_ADAPTERS:
raise Exception(
"Duplicated adapter from_version={}: '{}' and '{}'".format(
from_version, fn.__name__, DOWNGRADE_ADAPTERS[from_version].__name__
)
)
else:
DOWNGRADE_ADAPTERS[from_version] = fn
return fn
return decorator
def find_dicts_containing_key(json_config, key):
if key in json_config:
yield json_config
for _, v in json_config.items():
if hasattr(v, "__contains__") and hasattr(v, "items"):
yield from find_dicts_containing_key(v, key)
def rename(json_config, old_name, new_name):
for section in find_dicts_containing_key(json_config, old_name):
value = section.pop(old_name)
if new_name:
section[new_name] = value
def is_type_specifier(json_dict):
"""If a config object is a class, it might have a level which is a type specifier,
with one key corresponding to the name of whichever type it is. These types should
not be explicitly named in the path."""
# heuristic: one key, starting with uppercase character
if len(json_dict) != 1:
return False
key = next(iter(json_dict))
return key[0] == key[0].upper()
def find_parameter(config, path_str):
# Recursively find path elements, skipping into type specifiers.
# Return the value and its container so the value can be deleted.
path = path_str.split(".")
value = config
container = None
for segment in path:
while is_type_specifier(value):
container, value = value, next(iter(value.values()))
if segment not in value:
return NOT_THERE
container, value = value, value[segment]
return path[-1], container, value
def _create_path(config, path):
# Recursively find path elements, skipping into type specifiers.
# If any container isn't there, create a new empty object for it.
# This will only be created if the
value = config
for segment in path:
while is_type_specifier(value):
value = next(iter(value.values()))
if segment not in value:
value[segment] = {}
value = value[segment]
while is_type_specifier(value):
value = next(iter(value.values()))
return value
def create_parameter(config, path_str, value):
*path, param = path_str.split(".")
new_container = _create_path(config, path)
new_container[param] = value
def delete_parameter(config, path_str):
param_name, container, _ = find_parameter(config, path_str)
if container:
container.pop(param_name, None)
def rename_parameter(config, old_path, new_path, transform=lambda x: x):
"""A powerful tool for writing config adapters, this allows you to specify
a JSON-style path for an old and new config parameter. For instance
rename_parameter(config, "task.data.epoch_size", "task.trainer.batches_per_epoch")
will look through the config for task.data.epoch_size, including moving through
explicitly specified types. If it's specified, it will delete the value and
set it in task.trainer.num_batches_per_epoch instead, creating trainer as an empty
dictionary if necessary."""
found = find_parameter(config, old_path)
if found is not NOT_THERE:
param_name, container, old_value = found
# Delete old value
container.pop(param_name)
# Update new value
create_parameter(config, new_path, transform(old_value))
return config
@register_adapter(from_version=0)
def v0_to_v1(json_config):
# migrate optimizer and random_seed params
[task] = json_config["task"].values()
if (
"optimizer" not in task
or "Adam" in task["optimizer"]
or "SGD" in task["optimizer"]
or "NAG" in task["optimizer"]
) and ("trainer" not in task or "random_seed" not in task["trainer"]):
return json_config
if "trainer" in task and "random_seed" in task["trainer"]:
json_config["random_seed"] = task["trainer"]["random_seed"]
del task["trainer"]["random_seed"]
if "optimizer" in task and not any(
opt in task["optimizer"] for opt in ["Adam", "SGD", "NAG"]
):
op_type = task["optimizer"].get("type", "adam")
if op_type == "adam":
op_config = {"Adam": {}}
for key in ["lr", "weight_decay"]:
if key in task["optimizer"]:
op_config["Adam"][key] = task["optimizer"][key]
elif op_type == "sgd":
op_config = {"SGD": {}}
for key in ["lr", "momentum"]:
if key in task["optimizer"]:
op_config["SGD"][key] = task["optimizer"][key]
elif op_type == "nag":
op_config = {"NAG": {}}
for key in ["lr", "weight_decay", "momentum"]:
if key in task["optimizer"]:
op_config["NAG"][key] = task["optimizer"][key]
else:
raise ValueError("Migration not supported for your optimizer")
task["optimizer"] = op_config
return json_config
@register_adapter(from_version=1)
def v1_to_v2(json_config):
# migrate optimizer params
[task] = json_config["task"].values()
if (
"scheduler" not in task
or task["scheduler"] is None
or task["scheduler"].get("type") is None
):
return json_config
op_type = task["scheduler"].get("type")
if op_type == "step_lr":
op_config = {"StepLR": {}}
for key in ["step_size", "gamma"]:
if key in task["scheduler"]:
op_config["StepLR"][key] = task["scheduler"][key]
task["scheduler"] = op_config
elif op_type == "lm_fine_tuning":
op_config = {"LmFineTuning": {}}
for key in [
"cut_frac",
"ratio",
"non_pretrained_param_groups",
"lm_lr_multiplier",
"lm_use_per_layer_lr",
"lm_gradual_unfreezing",
"last_epoch",
]:
if key in task["scheduler"]:
op_config["LmFineTuning"][key] = task["scheduler"][key]
task["scheduler"] = op_config
elif op_type == "reduce_lr_on_plateau":
op_config = {"ReduceLROnPlateau": {}}
for key in [
"lower_is_better",
"factor",
"patience",
"min_lr",
"threshold",
"threshold_is_absolute",
"cooldown",
]:
if key in task["scheduler"]:
op_config["ReduceLROnPlateau"][key] = task["scheduler"][key]
task["scheduler"] = op_config
elif op_type == "cosine_annealing_lr":
op_config = {"CosineAnnealingLR": {}}
for key in ["t_max", "eta_min"]:
if key in task["scheduler"]:
op_config["CosineAnnealingLR"][key] = task["scheduler"][key]
task["scheduler"] = op_config
elif op_type == "exponential_lr":
op_config = {"ExponentialLR": {}}
for key in ["gamma"]:
if key in task["scheduler"]:
op_config["ExponentialLR"][key] = task["scheduler"][key]
task["scheduler"] = op_config
elif op_type == "none":
del task["scheduler"]
else:
raise ValueError("Migration for your scheduler %s not supported." % op_type)
return json_config
@register_adapter(from_version=2)
def v2_to_v3(json_config):
"""Optimizer and Scheduler configs used to be part of the task config,
they now live in the trainer's config.
"""
[task] = json_config["task"].values()
for section_str in ["optimizer", "scheduler"]:
if section_str in task:
if "trainer" not in task:
task["trainer"] = {}
trainer = task["trainer"]
# a hack to support an older hack:
# some tasks like ensemble have a 'real_trainer' section inside trainer
# that has the actual trainer config
if "real_trainer" in trainer:
real_trainer = trainer["real_trainer"]
real_trainer[section_str] = task[section_str]
else:
trainer[section_str] = task[section_str]
# remove from task config
task.pop(section_str)
return json_config
@register_adapter(from_version=3)
def v3_to_v4(json_config):
"""Key for provding the path for contextual token embedding has changed from
`pretrained_model_embedding` to `contextual_token_embedding. This affects the
`features` section of the config.
"""
[task] = json_config["task"].values()
old_key = "pretrained_model_embedding"
new_key = "contextual_token_embedding"
for section_str in ["features", "labels"]:
if section_str in task:
section = task[section_str]
if section and old_key in section:
section[new_key] = section[old_key]
section.pop(old_key)
return json_config
def deprecate(json_config, t):
for section in find_dicts_containing_key(json_config, t):
section[t + "_Deprecated"] = section.pop(t)
@register_adapter(from_version=4)
def doc_model_deprecated(json_config):
"""Rename DocModel to DocModel_Deprecated."""
deprecate(json_config, "DocModel")
return json_config
@register_adapter(from_version=5)
def old_tasks_deprecated(json_config):
"""
Rename tasks with data_handler config to _Deprecated
"""
deprecate(json_config, "BertClassificationTask")
deprecate(json_config, "BertPairClassificationTask")
deprecate(json_config, "BertPairwiseClassificationTask")
deprecate(json_config, "COLMClassifyTask")
deprecate(json_config, "ContextSCLSTMCompositionalTask")
deprecate(json_config, "DocClassificationTask")
deprecate(json_config, "ElmoDocClassificationTask")
deprecate(json_config, "ElmoFineTunePairwiseClassificationTask")
deprecate(json_config, "EnsembleTask")
deprecate(json_config, "FederatedLearningTaskBase")
deprecate(json_config, "FLDocClassificationTask")
deprecate(json_config, "FLQueryDocumentPairwiseRankingTask")
deprecate(json_config, "KDDocClassificationTask")
deprecate(json_config, "LMTask")
deprecate(json_config, "QueryDocumentPairwiseRankingTask")
deprecate(json_config, "SCLSTMCompositionalTask")
deprecate(json_config, "SCLSTMTask")
deprecate(json_config, "SemanticParsingCppTask")
deprecate(json_config, "SemanticParsingTask")
deprecate(json_config, "Seq2SeqTask")
deprecate(json_config, "Seq2SeqCompositionalMetricReporter")
deprecate(json_config, "Seq2SeqMetricReporter")
deprecate(json_config, "RNNEncoderDecoder")
deprecate(json_config, "SeqNNTask")
deprecate(json_config, "SGNNClassificationTask")
deprecate(json_config, "ShallowClassificationTask")
deprecate(json_config, "ShallowTaggingTask")
deprecate(json_config, "SpanClassificationTask")
deprecate(json_config, "TreeParserTask")
return json_config
@register_adapter(from_version=6)
def v6_to_v7(json_config):
"""
Make `LabelTensorizer` expansible. If the `labels` field should be an instance of
`LabelTensorizer`, convert it to`{LabelTensorizer: labels}`.
"""
[(task_name, task)] = json_config["task"].items()
if task_name in (
"BertPairRegressionTask",
"NewDocumentRegression",
"NewWordTaggingTask",
):
# Task has a label tensorizer different from LabelTensorizer.
return json_config
model = task.get("model")
if not model:
return json_config
model_name = None
if "inputs" in model:
inputs = model["inputs"]
elif len(model) == 1:
[(model_name, model_val)] = model.items()
inputs = model_val.get("inputs")
else:
inputs = None
if not inputs:
return json_config
if model_name in (
"NewBertRegressionModel",
"DocRegressionModel",
"NewWordTaggingModel",
"ELModel",
"EntitySalienceModel",
"MatchaTwoTowerModel",
):
# Model has a label tensorizer different from LabelTensorizer.
return json_config
labels = inputs.get("labels")
if labels is None:
return json_config
inputs["labels"] = {"LabelTensorizer": labels}
return json_config
@register_adapter(from_version=7)
def lm_model_deprecated(json_config):
"""
Rename LM model to _Deprecated (LMTask is already deprecated in v5)
"""
deprecate(json_config, "LMLSTM")
return json_config
@register_adapter(from_version=8)
def new_tasks_rename(json_config):
"""
Rename tasks with new API consistently
"""
# Deprecated
rename(
json_config,
"QueryDocumentPairwiseRankingModel",
"QueryDocumentPairwiseRankingModel_Deprecated",
)
# New
rename(json_config, "NewDocModel", "DocModel")
rename(json_config, "NewDocRegressionModel", "DocRegressionModel")
rename(json_config, "NewDocumentClassification", "DocumentClassificationTask")
rename(json_config, "NewDocumentRegression", "DocumentRegressionTask")
rename(
json_config,
"NewQueryDocumentPairwiseRankingModel",
"QueryDocPairwiseRankingModel",
)
rename(json_config, "NewWordTaggingModel", "WordTaggingModel")
rename(json_config, "NewWordTaggingTask", "WordTaggingTask")
rename(json_config, "PairwiseClassification", "PairwiseClassificationTask")
rename(
json_config, "QueryDocumentPairwiseRanking", "QueryDocumentPairwiseRankingTask"
)
return json_config
@register_adapter(from_version=9)
def move_epoch_size(json_config):
return rename_parameter(
json_config, "task.data.epoch_size", "task.trainer.num_batches_per_epoch"
)
@register_adapter(from_version=10)
def ensemble_task_deprecated(json_config):
"""
Rename tasks with new API consistently
"""
# Deprecated
deprecate(json_config, "BaggingDocEnsemble")
deprecate(json_config, "BaggingIntentSlotEnsemble")
deprecate(json_config, "EnsembleTrainer")
return json_config
@register_adapter(from_version=11)
def rename_bitransformer_inputs(json_config):
"""
In "BiTransformer" model, rename input "characters" -> "bytes" and update subfields.
"""
[task] = json_config["task"].values()
model = task.get("model")
if model and len(model) == 1 and "BiTransformer" in model:
model_val = list(model.values())[0]
if "inputs" not in model_val:
model_val["inputs"] = {}
inputs = model_val["inputs"]
char_config = inputs.pop("characters", {})
if "max_char_length" in char_config:
char_config["max_byte_len"] = char_config.pop("max_char_length")
char_config["offset_for_non_padding"] = 1
model_val["inputs"]["bytes"] = char_config
return json_config
@register_adapter(from_version=12)
def v12_to_v13(json_config):
"""remove_output_encoded_layers(json_config)"""
rename(json_config, "output_encoded_layers", None)
"""
Make 'ClassificationMetricReporter'
expansible.
If the 'metric_reporter' field should be an instance of
'ClassificationMetricReporter',
convert it to '{ClassificationMetricReporter: metric_reporter}'.
"""
[(task_name, task)] = json_config["task"].items()
if task_name not in (
"EnsembleTask",
"DocClassificationTask_Deprecated",
"DocumentClassificationTask",
"PairwiseClassificationTask",
"SeqNNTask",
"ShallowClassificationTask_Deprecated",
"KDDocClassificationTask_Deprecated",
"XLMDocumentClassification",
"XLMPairClassification",
"NewBertClassificationTask",
"NewBertPairClassificationTask",
"LaserClassificationTask",
):
# Task has a metric reporter different from ClassificationMetricReporter
return json_config
metric_reporter = task.get("metric_reporter")
if metric_reporter is None:
return json_config
keys = list(metric_reporter.keys())
if keys == []:
return json_config
set = {"output_path", "model_select_metric", "target_label", "text_column_names"}
if keys[0] in set:
task["metric_reporter"] = {"ClassificationMetricReporter": metric_reporter}
else:
return json_config
return json_config
@register_adapter(from_version=13)
def rename_tensorizer_vocab_params(json_config):
[(task_name, task)] = json_config["task"].items()
# XLM and Bert models use the `vocab_file` field, but in a custom way. This
# field should not be migrated to `vocab.vocab_files` as for TokenTensorizer.
if "XLM" in task_name or "Bert" in task_name:
return json_config
def resolve_model(model_config):
if len(model_config) == 1 and list(model_config)[0][0].isupper():
[(model_name, model_config)] = model_config.items()
if "XLM" in model_name or "Bert" in model_name:
return {}
return model_config
model = resolve_model(task.get("model", {}))
if not model:
return json_config
def update_model_config(model_config):
model_config = resolve_model(model_config)
tokens = model_config.get("inputs", {}).get("tokens")
if not tokens:
return
vocab = {"build_from_data": tokens.pop("build_vocab", True), "vocab_files": []}
if "vocab_file" in tokens:
vocab["vocab_files"].append(
{
"filepath": tokens.pop("vocab_file"),
"size_limit": tokens.pop("vocab_file_size_limit", 0),
}
)
if "models" in model:
# ensemble model
for sub_model in model["models"]:
update_model_config(sub_model)
else:
update_model_config(model)
return json_config
@register_adapter(from_version=14)
def flatten_deprecated_ensemble_config(json_config):
# Deprecated ensemble is removed from codebase, so this is now just a no-op
return json_config
def migrate_to_new_data_handler(task, columns):
create_parameter(task, "data.source", {"TSVDataSource": {}})
rename_parameter(task, "data_handler.eval_path", "data.source.eval_filename")
rename_parameter(task, "data_handler.test_path", "data.source.test_filename")
rename_parameter(task, "data_handler.train_path", "data.source.train_filename")
columns_to_read = next(find_dicts_containing_key(task, "columns_to_read"), None)
if columns_to_read:
rename_parameter(
task, "data_handler.columns_to_read", "data.source.field_names"
)
else:
create_parameter(task, "data.source.field_names", columns)
rename_parameter(
task, "data_handler.append_bos", "model.inputs.tokens.add_bos_token"
)
rename_parameter(
task, "data_handler.append_eos", "model.inputs.tokens.add_eos_token"
)
rename_parameter(
task, "data_handler.max_seq_len", "model.inputs.tokens.max_seq_len"
)
rename_parameter(
task, "features.shared_module_key", "model.embedding.shared_module_key"
)
rename_parameter(task, "features.word_feat.embed_dim", "model.embedding.embed_dim")
rename_parameter(task, "features.dense_feat", "model.inputs.dense")
create_parameter(task, "data.batcher", {"PoolingBatcher": {}})
rename_parameter(
task, "data_handler.eval_batch_size", "data.batcher.eval_batch_size"
)
rename_parameter(
task, "data_handler.test_batch_size", "data.batcher.test_batch_size"
)
rename_parameter(
task, "data_handler.train_batch_size", "data.batcher.train_batch_size"
)
rename_parameter(
task,
"features.word_feat.vocab_size",
"model.inputs.tokens.vocab.size_from_data",
)
rename_parameter(
task,
"features.word_feat.vocab_from_train_data",
"model.inputs.tokens.vocab.build_from_data",
)
rename_parameter(
task,
"features.word_feat.vocab_file",
"model.inputs.tokens.vocab.vocab_files",
lambda x: [{"filepath": x}],
)
rename_parameter(task, "labels.label_weights", "model.output_layer.label_weights")
delete_parameter(task, "data_handler")
delete_parameter(task, "exporter")
delete_parameter(task, "features")
delete_parameter(task, "featurizer")
delete_parameter(task, "labels")
@register_adapter(from_version=15)
def remove_lmtask_deprecated(json_config):
for section in find_dicts_containing_key(json_config, "LMTask_Deprecated"):
task = section.pop("LMTask_Deprecated")
migrate_to_new_data_handler(task, ["text"])
section["LMTask"] = task
return json_config
@register_adapter(from_version=16)
def remove_docclassificationtask_deprecated(json_config):
for section in find_dicts_containing_key(
json_config, "DocClassificationTask_Deprecated"
):
task = section.pop("DocClassificationTask_Deprecated")
convert = next(find_dicts_containing_key(task, "convert_to_bytes"), None)
section["DocumentClassificationTask"] = task
migrate_to_new_data_handler(task, ["doc_label", "text"])
create_parameter(task, "model.inputs.labels.column", "doc_label")
# In DocumentClassificationTask.Config:
# model: BaseModel.Config = DocModel.Config()
# It will create a BaseModel if model class is implicit in json.
# We make it explicit to avoid errors.
for model in find_dicts_containing_key(section, "model"):
if next(iter(model["model"]))[0].islower():
model["model"] = {"DocModel": model.pop("model")}
if convert and convert["convert_to_bytes"]:
rename(section, "DocModel", "ByteTokensDocumentModel")
return json_config
@register_adapter(from_version=17)
def rename_fl_task(json_config):
# remove 'NewDoc' from FL task names
for trainer_suffix in ["SyncTrainer", "AsyncTrainer"]:
old_trainer_name = f"FLNewDoc{trainer_suffix}"
new_trainer_name = f"FL{trainer_suffix}"
for section in find_dicts_containing_key(json_config, old_trainer_name):
section[new_trainer_name] = section.pop(old_trainer_name)
return json_config
@register_adapter(from_version=18)
def upgrade_if_xlm(json_config):
"""
Make `XLMModel` Union changes for encoder and tokens config.
Since they are now unions, insert the old class into the config if
no class name is mentioned.
"""
_, _, model = find_parameter(json_config, "task.model")
if model and "XLMModel" in model:
_, inputs, tokens = find_parameter(json_config, "task.model.inputs.tokens")
if tokens and "XLMTensorizer" not in tokens:
inputs["tokens"] = {}
inputs["tokens"]["XLMTensorizer"] = tokens
return json_config
@register_adapter(from_version=19)
def fix_fl_local_optimizer_and_trainer(json_config):
"""a) Change FL local optimizer from optimizer:{SGD:{lr=0.1, momentum=0.2}}
to optimizer:{lr=0.1, momentum=0.2}
b) Replace trainer:{FLSyncTrainer:{foo}} by
trainer:{fl_trainer:{foo, type:SyncTrainer}}
Same for FLAsyncTrainer
"""
# only for tasks that contain FLSyncTrainer or FLAsyncTrainer
_, container, trainer = find_parameter(json_config, "task.trainer")
if not trainer:
return json_config
if "FLSyncTrainer" in trainer:
fl_trainer_name, fl_trainer_type = "FLSyncTrainer", "SyncTrainer"
elif "FLAsyncTrainer" in trainer:
fl_trainer_name, fl_trainer_type = "FLAsyncTrainer", "AsyncTrainer"
else:
return json_config
trainer_section = trainer.pop(fl_trainer_name)
# first, replace optimizer:{SGD:{lr=0.1, momentum=0.2}} by
# optimizer:{lr=0.1, momentum=0.2}
if "optimizer" in trainer_section:
optimizer = trainer_section.pop("optimizer")
sgd_config = optimizer.pop("SGD")
trainer_section["optimizer"] = sgd_config
# rename "global_optimizer" to "aggregator"
if "global_optimizer" in trainer_section:
aggregator = trainer_section.pop("global_optimizer")
trainer_section["aggregator"] = aggregator
trainer_section["type"] = fl_trainer_type
trainer["fl_trainer"] = trainer_section
return json_config
@register_adapter(from_version=20)
def upgrade_padding(json_config):
"""
Upgrade config option padding_control to seq_padding_control.
"""
json_config["seq_padding_control"] = json_config.pop("padding_control", None)
return json_config
@register_adapter(from_version=21)
def upgrade_export_config(json_config):
"""
Upgrade model export related config fields to the new "export" section.
"""
export_config_fields = [
"export_caffe2_path",
"export_onnx_path",
"export_torchscript_path",
"torchscript_quantize",
"accelerate",
"inference_interface",
"seq_padding_control",
"batch_padding_control",
"target",
]
export_config = {}
for f in export_config_fields:
if f in json_config:
export_config[f] = json_config.pop(f, None)
json_config["export"] = export_config
return json_config
@register_adapter(from_version=22)
def v22_to_v23(json_config):
"""
Upgrade by adding read_chunk_size option
"""
if "read_chunk_size" not in json_config:
json_config["read_chunk_size"] = PyTextConfig.read_chunk_size
return json_config
@register_adapter(from_version=23)
def v23_to_v24(json_config):
"""
No-op since export_list is optional
"""
return json_config
@register_adapter(from_version=24)
def v24_to_v25(json_config):
"""
Upgrade by adding max_input_text_length option and default to None
"""
for v in get_json_config_iterator(json_config, "SentencePieceTokenizer"):
if "max_input_text_length" not in v:
v["max_input_text_length"] = None
return json_config
@register_down_grade_adapter(from_version=23)
def v23_to_v22(json_config):
"""
Upgrade by removing read_chunk_size option
"""
if "read_chunk_size" in json_config:
del json_config["read_chunk_size"]
return json_config
@register_down_grade_adapter(from_version=24)
def v24_to_v23(json_config):
"""
Downgrade by removing export_list option
"""
if "export_list" in json_config:
if len(json_config["export_list"]) > 1:
raise Exception(
"Current version does not support multiple exports in export_list"
)
elif len(json_config["export_list"]) == 0:
raise Exception("Current version does not support empty export_list")
json_config["export"] = json_config["export_list"][0]
del json_config["export_list"]
return json_config
@register_down_grade_adapter(from_version=25)
def v25_to_v24(json_config):
"""
Downgrade by removing max_input_text_length option for SentencePieceTokenizer
"""
for v in get_json_config_iterator(json_config, "SentencePieceTokenizer"):
if "max_input_text_length" in v:
del v["max_input_text_length"]
return json_config
def get_json_config_iterator(json_config, lookup_key):
for key, value in json_config.items():
if key == lookup_key:
yield value
elif isinstance(value, dict):
for v in get_json_config_iterator(value, lookup_key):
yield v
@register_down_grade_adapter(from_version=26)
def v26_to_v25(json_config):
"""
Downgrade by removing target option from all
exports in export_list
"""
if "export" in json_config:
if "target" in json_config["export"]:
json_config["export"].pop("target")
if "export_list" in json_config:
export_list = json_config["export_list"]
for export_cfg in export_list:
if "target" in export_cfg:
export_cfg.pop("target")
json_config["export_list"] = export_list
return json_config
@register_adapter(from_version=25)
def v25_to_v26(json_config):
if "export" in json_config:
export_cfg = json_config["export"]
export_cfg["target"] = get_name_from_options(export_cfg)
if "inference_interface" in export_cfg:
export_cfg.pop("inference_interface")
json_config["export"] = export_cfg
if "export_list" in json_config:
export_list = json_config["export_list"]
for export_cfg in export_list:
export_cfg["target"] = get_name_from_options(export_cfg)
if "inference_interface" in export_cfg:
export_cfg.pop("inference_interface")
json_config["export_list"] = export_list
return json_config
@register_down_grade_adapter(from_version=27)
def v27_to_v26(json_config):
"""
Downgrade by removing scaling option for RoBERTaEncoder
"""
for v in get_json_config_iterator(json_config, "RoBERTaEncoder"):
if "scaling" in v:
del v["scaling"]
return json_config
@register_adapter(from_version=26)
def v26_to_v27(json_config):
"""
No-op since scaling is optional
"""
return json_config
def get_name_from_options(export_config):
"""
Reverse engineer which model is which based on recognized
export configurations. If the export configurations don't adhere
to the set of recognized backends, then set target name to unknown
"""
if "accelerate" in export_config and len(export_config["accelerate"]) != 0:
if export_config["accelerate"][0] == "cuda:half":
tgt = "gpu-fp16"
elif (
export_config["accelerate"][0] == "nnpi"
and "seq_padding_control" in export_config
and "batch_padding_control" in export_config
):
tgt = "nnpi"
else:
pass
elif "seq_padding_control" and "batch_padding_control" in export_config:
tgt = "nnpi"
else:
tgt = "unknown"
return tgt
def upgrade_one_version(json_config):
current_version = json_config.get("version", 0)
adapter = ADAPTERS.get(current_version)
if not adapter:
raise Exception(
f"no adapter found for version {current_version}."
"Make sure current revision is after pytext pkg's revision, and rebase if necessary"
)
json_config = adapter(json_config)
eprint(
f"WARNING - Applying old config adapter for version={current_version}. "
"Please consider migrating your old configs to the latest version."
)
json_config["version"] = current_version + 1
return json_config
def downgrade_one_version(json_config):
current_version = json_config.get("version", 0)
downgrade_adapter = DOWNGRADE_ADAPTERS.get(current_version)
if not downgrade_adapter:
raise Exception(f"no downgrade adapter found for version {current_version}")
json_config = downgrade_adapter(json_config)
eprint(
f"WARNING - Downgrading your current config version={current_version}. "
"Please wait for next pytext pkg release to let new config take effect."
)
json_config["version"] = current_version - 1
return json_config
def upgrade_to_latest(json_config):
current_version = json_config.get("version") or 0
if current_version > LATEST_VERSION:
raise Exception(
f"config version {json_config['version']} shouldn't exceed lastest \
version {LATEST_VERSION}"
)
while current_version != LATEST_VERSION:
print(f"Current Version: {current_version}")
json_config = upgrade_one_version(json_config)
current_version = json_config["version"]
return json_config
def update_to_version(json_config, expected_version=LATEST_VERSION):
current_version = json_config.get("version") or 0
if current_version > expected_version:
while current_version != expected_version:
print(f"Current Version: {current_version}")
json_config = downgrade_one_version(json_config)
current_version = json_config["version"]
while current_version != expected_version:
print(f"Current Version: {current_version}")
json_config = upgrade_one_version(json_config)
current_version = json_config["version"]
return json_config
| 34.440994
| 96
| 0.666607
|
848debb5e53b87ef7c4b4628545b4f4c48f5c57a
| 26,980
|
py
|
Python
|
cinder/service.py
|
helenwalsh/cinder
|
307fccea4cc9c6496334b0fe137206cb48499bd5
|
[
"Apache-2.0"
] | 571
|
2015-01-01T17:47:26.000Z
|
2022-03-23T07:46:36.000Z
|
cinder/service.py
|
BelieveInFuture/cinder
|
fff95fa6a68a054488ee087b6e31f4f5e28209dc
|
[
"Apache-2.0"
] | 37
|
2015-01-22T23:27:04.000Z
|
2021-02-05T16:38:48.000Z
|
cinder/service.py
|
BelieveInFuture/cinder
|
fff95fa6a68a054488ee087b6e31f4f5e28209dc
|
[
"Apache-2.0"
] | 841
|
2015-01-04T17:17:11.000Z
|
2022-03-31T12:06:51.000Z
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import inspect
import os
import random
import subprocess
import sys
import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_service import service
from oslo_service import wsgi
from oslo_utils import importutils
osprofiler_initializer = importutils.try_import('osprofiler.initializer')
profiler = importutils.try_import('osprofiler.profiler')
profiler_opts = importutils.try_import('osprofiler.opts')
from cinder.common import constants
from cinder import context
from cinder import coordination
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import base as objects_base
from cinder.objects import fields
from cinder import rpc
from cinder import version
from cinder.volume import volume_utils
if os.name == 'nt':
from os_win import utilsfactory as os_win_utilsfactory
else:
os_win_utilsfactory = None
LOG = logging.getLogger(__name__)
service_opts = [
cfg.IntOpt('report_interval',
default=10,
help='Interval, in seconds, between nodes reporting state '
'to datastore'),
cfg.IntOpt('periodic_interval',
default=60,
help='Interval, in seconds, between running periodic tasks'),
cfg.IntOpt('periodic_fuzzy_delay',
default=60,
help='Range, in seconds, to randomly delay when starting the'
' periodic task scheduler to reduce stampeding.'
' (Disable by setting to 0)'),
cfg.StrOpt('osapi_volume_listen',
default="0.0.0.0",
help='IP address on which OpenStack Volume API listens'),
cfg.PortOpt('osapi_volume_listen_port',
default=8776,
help='Port on which OpenStack Volume API listens'),
cfg.IntOpt('osapi_volume_workers',
help='Number of workers for OpenStack Volume API service. '
'The default is equal to the number of CPUs available.'),
cfg.BoolOpt('osapi_volume_use_ssl',
default=False,
help='Wraps the socket in a SSL context if True is set. '
'A certificate file and key file must be specified.'), ]
CONF = cfg.CONF
CONF.register_opts(service_opts)
if profiler_opts:
profiler_opts.set_defaults(CONF)
def setup_profiler(binary, host):
if (osprofiler_initializer is None or
profiler is None or
profiler_opts is None):
LOG.debug('osprofiler is not present')
return
if CONF.profiler.enabled:
osprofiler_initializer.init_from_conf(
conf=CONF,
context=context.get_admin_context().to_dict(),
project="cinder",
service=binary,
host=host
)
LOG.warning(
"OSProfiler is enabled.\nIt means that person who knows "
"any of hmac_keys that are specified in "
"/etc/cinder/cinder.conf can trace his requests. \n"
"In real life only operator can read this file so there "
"is no security issue. Note that even if person can "
"trigger profiler, only admin user can retrieve trace "
"information.\n"
"To disable OSProfiler set in cinder.conf:\n"
"[profiler]\nenabled=false")
class Service(service.Service):
"""Service object for binaries running on hosts.
A service takes a manager and enables rpc by listening to queues based
on topic. It also periodically runs tasks on the manager and reports
it state to the database services table.
"""
# Make service_id a class attribute so it can be used for clean up
service_id = None
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_interval=None, periodic_fuzzy_delay=None,
service_name=None, coordination=False, cluster=None, *args,
**kwargs):
super(Service, self).__init__()
if not rpc.initialized():
rpc.init(CONF)
self.cluster = cluster
self.host = host
self.binary = binary
self.topic = topic
self.manager_class_name = manager
self.coordination = coordination
manager_class = importutils.import_class(self.manager_class_name)
if CONF.profiler.enabled:
manager_class = profiler.trace_cls("rpc")(manager_class)
self.service = None
self.manager = manager_class(host=self.host,
cluster=self.cluster,
service_name=service_name,
*args, **kwargs)
self.availability_zone = self.manager.availability_zone
# NOTE(geguileo): We need to create the Service DB entry before we
# create the manager, otherwise capped versions for serializer and rpc
# client would use existing DB entries not including us, which could
# result in us using None (if it's the first time the service is run)
# or an old version (if this is a normal upgrade of a single service).
ctxt = context.get_admin_context()
try:
service_ref = objects.Service.get_by_args(ctxt, host, binary)
service_ref.rpc_current_version = manager_class.RPC_API_VERSION
obj_version = objects_base.OBJ_VERSIONS.get_current()
service_ref.object_current_version = obj_version
# added_to_cluster attribute marks when we consider that we have
# just added a host to a cluster so we can include resources into
# that cluster. We consider that we have added the host when we
# didn't have data in the cluster DB field and our current
# configuration has a cluster value. We don't want to do anything
# automatic if the cluster is changed, in those cases we'll want
# to use cinder manage command and to it manually.
self.added_to_cluster = (not service_ref.cluster_name and cluster)
if service_ref.cluster_name != cluster:
LOG.info('This service has been moved from cluster '
'%(cluster_svc)s to %(cluster_cfg)s. Resources '
'will %(opt_no)sbe moved to the new cluster',
{'cluster_svc': service_ref.cluster_name,
'cluster_cfg': cluster,
'opt_no': '' if self.added_to_cluster else 'NOT '})
if self.added_to_cluster:
# We pass copy service's disable status in the cluster if we
# have to create it.
self._ensure_cluster_exists(ctxt, service_ref)
service_ref.cluster_name = cluster
service_ref.save()
Service.service_id = service_ref.id
self.origin_service_id = service_ref.id
except exception.NotFound:
self._create_service_ref(ctxt, manager_class.RPC_API_VERSION)
# Service entry Entry didn't exist because it was manually removed
# or it's the first time running, to be on the safe side we say we
# were added if we are clustered.
self.added_to_cluster = bool(cluster)
self.report_interval = report_interval
self.periodic_interval = periodic_interval
self.periodic_fuzzy_delay = periodic_fuzzy_delay
self.basic_config_check()
self.saved_args, self.saved_kwargs = args, kwargs
setup_profiler(binary, host)
self.rpcserver = None
self.backend_rpcserver = None
self.cluster_rpcserver = None
def start(self):
version_string = version.version_string()
LOG.info('Starting %(topic)s node (version %(version_string)s)',
{'topic': self.topic, 'version_string': version_string})
self.model_disconnected = False
if self.coordination:
coordination.COORDINATOR.start()
# NOTE(yikun): When re-spawning child process, we should set the class
# attribute back using the origin service_id, otherwise,
# the Service.service_id will be inherited from the parent process,
# and will be recorded as the last started service id by mistaken.
Service.service_id = self.origin_service_id
self.manager.init_host(added_to_cluster=self.added_to_cluster,
service_id=Service.service_id)
LOG.debug("Creating RPC server for service %s", self.topic)
ctxt = context.get_admin_context()
endpoints = [self.manager]
endpoints.extend(self.manager.additional_endpoints)
obj_version_cap = objects.Service.get_minimum_obj_version(ctxt)
LOG.debug("Pinning object versions for RPC server serializer to %s",
obj_version_cap)
serializer = objects_base.CinderObjectSerializer(obj_version_cap)
target = messaging.Target(topic=self.topic, server=self.host)
self.rpcserver = rpc.get_server(target, endpoints, serializer)
self.rpcserver.start()
# NOTE(dulek): Kids, don't do that at home. We're relying here on
# oslo.messaging implementation details to keep backward compatibility
# with pre-Ocata services. This will not matter once we drop
# compatibility with them.
if self.topic == constants.VOLUME_TOPIC:
target = messaging.Target(
topic='%(topic)s.%(host)s' % {'topic': self.topic,
'host': self.host},
server=volume_utils.extract_host(self.host, 'host'))
self.backend_rpcserver = rpc.get_server(target, endpoints,
serializer)
self.backend_rpcserver.start()
if self.cluster:
LOG.info('Starting %(topic)s cluster %(cluster)s (version '
'%(version)s)',
{'topic': self.topic, 'version': version_string,
'cluster': self.cluster})
target = messaging.Target(
topic='%s.%s' % (self.topic, self.cluster),
server=volume_utils.extract_host(self.cluster, 'host'))
serializer = objects_base.CinderObjectSerializer(obj_version_cap)
self.cluster_rpcserver = rpc.get_server(target, endpoints,
serializer)
self.cluster_rpcserver.start()
self.manager.init_host_with_rpc()
if self.report_interval:
self.tg.add_timer(self.report_interval, self.report_state,
initial_delay=self.report_interval)
if self.periodic_interval:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
self.tg.add_timer(self.periodic_interval, self.periodic_tasks,
initial_delay=initial_delay)
def basic_config_check(self):
"""Perform basic config checks before starting service."""
# Make sure report interval is less than service down time
if self.report_interval:
if CONF.service_down_time <= self.report_interval:
new_down_time = int(self.report_interval * 2.5)
LOG.warning(
"Report interval must be less than service down "
"time. Current config service_down_time: "
"%(service_down_time)s, report_interval for this: "
"service is: %(report_interval)s. Setting global "
"service_down_time to: %(new_down_time)s",
{'service_down_time': CONF.service_down_time,
'report_interval': self.report_interval,
'new_down_time': new_down_time})
CONF.set_override('service_down_time', new_down_time)
def _ensure_cluster_exists(self, context, service):
if self.cluster:
try:
cluster = objects.Cluster.get_by_id(context, None,
name=self.cluster,
binary=self.binary)
# If the cluster already exists, then the service replication
# fields must match those of the cluster unless the service
# is in error status.
error_states = (fields.ReplicationStatus.ERROR,
fields.ReplicationStatus.FAILOVER_ERROR)
if service.replication_status not in error_states:
for attr in ('replication_status', 'active_backend_id',
'frozen'):
if getattr(service, attr) != getattr(cluster, attr):
setattr(service, attr, getattr(cluster, attr))
except exception.ClusterNotFound:
# Since the cluster didn't exist, we copy replication fields
# from the service.
cluster = objects.Cluster(
context=context,
name=self.cluster,
binary=self.binary,
disabled=service.disabled,
replication_status=service.replication_status,
active_backend_id=service.active_backend_id,
frozen=service.frozen)
try:
cluster.create()
# Race condition occurred and another service created the
# cluster, so we can continue as it already exists.
except exception.ClusterExists:
pass
def _create_service_ref(self, context, rpc_version=None):
kwargs = {
'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0,
'availability_zone': self.availability_zone,
'rpc_current_version': rpc_version or self.manager.RPC_API_VERSION,
'object_current_version': objects_base.OBJ_VERSIONS.get_current(),
}
kwargs['cluster_name'] = self.cluster
service_ref = objects.Service(context=context, **kwargs)
service_ref.create()
Service.service_id = service_ref.id
self.origin_service_id = service_ref.id
self._ensure_cluster_exists(context, service_ref)
# If we have updated the service_ref with replication data from
# the cluster it will be saved.
service_ref.save()
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_interval=None,
periodic_fuzzy_delay=None, service_name=None,
coordination=False, cluster=None, **kwargs):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'cinder-' part
:param manager: defaults to CONF.<topic>_manager
:param report_interval: defaults to CONF.report_interval
:param periodic_interval: defaults to CONF.periodic_interval
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
:param cluster: Defaults to None, as only some services will have it
"""
if not host:
host = CONF.host
if not binary:
binary = os.path.basename(inspect.stack()[-1][1])
if not topic:
topic = binary
if not manager:
subtopic = topic.rpartition('cinder-')[2]
manager = CONF.get('%s_manager' % subtopic, None)
if report_interval is None:
report_interval = CONF.report_interval
if periodic_interval is None:
periodic_interval = CONF.periodic_interval
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
periodic_interval=periodic_interval,
periodic_fuzzy_delay=periodic_fuzzy_delay,
service_name=service_name,
coordination=coordination,
cluster=cluster, **kwargs)
return service_obj
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.rpcserver.stop()
if self.backend_rpcserver:
self.backend_rpcserver.stop()
if self.cluster_rpcserver:
self.cluster_rpcserver.stop()
except Exception:
pass
if self.coordination:
try:
coordination.COORDINATOR.stop()
except Exception:
pass
super(Service, self).stop(graceful=True)
def wait(self):
if self.rpcserver:
self.rpcserver.wait()
if self.backend_rpcserver:
self.backend_rpcserver.wait()
if self.cluster_rpcserver:
self.cluster_rpcserver.wait()
super(Service, self).wait()
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
self.manager.run_periodic_tasks(ctxt, raise_on_error=raise_on_error)
def report_state(self):
"""Update the state of this service in the datastore."""
if not self.manager.is_working():
# NOTE(dulek): If manager reports a problem we're not sending
# heartbeats - to indicate that service is actually down.
LOG.error('Manager for service %(binary)s %(host)s is '
'reporting problems, not sending heartbeat. '
'Service will appear "down".',
{'binary': self.binary,
'host': self.host})
return
ctxt = context.get_admin_context()
try:
try:
service_ref = objects.Service.get_by_id(ctxt,
Service.service_id)
except exception.NotFound:
LOG.debug('The service database object disappeared, '
'recreating it.')
self._create_service_ref(ctxt)
service_ref = objects.Service.get_by_id(ctxt,
Service.service_id)
service_ref.report_count += 1
if self.availability_zone != service_ref.availability_zone:
service_ref.availability_zone = self.availability_zone
service_ref.save()
# TODO(termie): make this pattern be more elegant.
if getattr(self, 'model_disconnected', False):
self.model_disconnected = False
LOG.error('Recovered model server connection!')
except db_exc.DBConnectionError:
if not getattr(self, 'model_disconnected', False):
self.model_disconnected = True
LOG.exception('model server went away')
# NOTE(jsbryant) Other DB errors can happen in HA configurations.
# such errors shouldn't kill this thread, so we handle them here.
except db_exc.DBError:
if not getattr(self, 'model_disconnected', False):
self.model_disconnected = True
LOG.exception('DBError encountered: ')
except Exception:
if not getattr(self, 'model_disconnected', False):
self.model_disconnected = True
LOG.exception('Exception encountered: ')
def reset(self):
self.manager.reset()
super(Service, self).reset()
class WSGIService(service.ServiceBase):
"""Provides ability to launch API from a 'paste' configuration."""
def __init__(self, name, loader=None):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
"""
self.name = name
self.manager = self._get_manager()
self.loader = loader or wsgi.Loader(CONF)
self.app = self.loader.load_app(name)
self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
self.port = getattr(CONF, '%s_listen_port' % name, 0)
self.use_ssl = getattr(CONF, '%s_use_ssl' % name, False)
self.workers = (getattr(CONF, '%s_workers' % name, None) or
processutils.get_worker_count())
if self.workers and self.workers < 1:
worker_name = '%s_workers' % name
msg = (_("%(worker_name)s value of %(workers)d is invalid, "
"must be greater than 0.") %
{'worker_name': worker_name,
'workers': self.workers})
raise exception.InvalidConfigurationValue(msg)
setup_profiler(name, self.host)
self.server = wsgi.Server(CONF,
name,
self.app,
host=self.host,
port=self.port,
use_ssl=self.use_ssl)
def _get_manager(self):
"""Initialize a Manager object appropriate for this service.
Use the service name to look up a Manager subclass from the
configuration and initialize an instance. If no class name
is configured, just return None.
:returns: a Manager instance, or None.
"""
fl = '%s_manager' % self.name
if fl not in CONF:
return None
manager_class_name = CONF.get(fl, None)
if not manager_class_name:
return None
manager_class = importutils.import_class(manager_class_name)
return manager_class()
def start(self):
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
if self.manager:
self.manager.init_host()
self.server.start()
self.port = self.server.port
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
"""Wait for the service to stop serving this API.
:returns: None
"""
self.server.wait()
def reset(self):
"""Reset server greenpool size to default.
:returns: None
"""
self.server.reset()
def process_launcher():
return service.ProcessLauncher(CONF, restart_method='mutate')
# NOTE(vish): the global launcher is to maintain the existing
# functionality of calling service.serve +
# service.wait
_launcher = None
def serve(server, workers=None):
global _launcher
if _launcher:
raise RuntimeError(_('serve() can only be called once'))
_launcher = service.launch(CONF, server, workers=workers,
restart_method='mutate')
def wait():
CONF.log_opt_values(LOG, logging.DEBUG)
try:
_launcher.wait()
except KeyboardInterrupt:
_launcher.stop()
rpc.cleanup()
class Launcher(object):
def __init__(self):
self.launch_service = serve
self.wait = wait
def get_launcher():
# Note(lpetrut): ProcessLauncher uses green pipes which fail on Windows
# due to missing support of non-blocking I/O pipes. For this reason, the
# service must be spawned differently on Windows, using the ServiceLauncher
# class instead.
if os.name == 'nt':
return Launcher()
else:
return process_launcher()
class WindowsProcessLauncher(object):
def __init__(self):
self._processutils = os_win_utilsfactory.get_processutils()
self._workers = []
self._worker_job_handles = []
self._signal_handler = service.SignalHandler()
self._add_signal_handlers()
def add_process(self, cmd):
LOG.info("Starting subprocess: %s", cmd)
worker = subprocess.Popen(cmd)
try:
job_handle = self._processutils.kill_process_on_job_close(
worker.pid)
except Exception:
LOG.exception("Could not associate child process "
"with a job, killing it.")
worker.kill()
raise
self._worker_job_handles.append(job_handle)
self._workers.append(worker)
def _add_signal_handlers(self):
self._signal_handler.add_handler('SIGINT', self._terminate)
self._signal_handler.add_handler('SIGTERM', self._terminate)
def _terminate(self, *args):
# We've already assigned win32 job objects to child processes,
# requesting them to stop once all the job handles are closed.
# When this process dies, so will the child processes.
LOG.info("Received request to terminate.")
sys.exit(1)
def wait(self):
pids = [worker.pid for worker in self._workers]
if pids:
self._processutils.wait_for_multiple_processes(pids,
wait_all=True)
# By sleeping here, we allow signal handlers to be executed.
time.sleep(0)
| 39.734904
| 79
| 0.606746
|
8ccf59ddfaba7a7b5e8f295d4b8245a153b57f88
| 790
|
py
|
Python
|
home/kwatters/harry/gestures/armsUp.py
|
rv8flyboy/pyrobotlab
|
4e04fb751614a5cb6044ea15dcfcf885db8be65a
|
[
"Apache-2.0"
] | 63
|
2015-02-03T18:49:43.000Z
|
2022-03-29T03:52:24.000Z
|
home/kwatters/harry/gestures/armsUp.py
|
hirwaHenryChristian/pyrobotlab
|
2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9
|
[
"Apache-2.0"
] | 16
|
2016-01-26T19:13:29.000Z
|
2018-11-25T21:20:51.000Z
|
home/kwatters/harry/gestures/armsUp.py
|
hirwaHenryChristian/pyrobotlab
|
2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9
|
[
"Apache-2.0"
] | 151
|
2015-01-03T18:55:54.000Z
|
2022-03-04T07:04:23.000Z
|
def armsUp():
i01.setHeadSpeed(1.0,1.0)
i01.moveHead(180,86)
sleep(1)
i01.setHandSpeed("left",0.90,0.90,0.90,0.90,0.90,1.0)
i01.setHandSpeed("right",0.90,0.90,0.90,0.90,0.90,1.0)
i01.moveHand("left",170,170,170,170,170,33)
i01.moveHand("right",170,170,170,170,170,180)
sleep(3)
i01.setArmSpeed("left",1.0,1.0,1.0,1.0)
i01.setArmSpeed("right",1.0,1.0,1.0,1.0)
i01.setTorsoSpeed(1.0,1.0,1.0)
i01.moveArm("left",90,90,170,20)
i01.moveArm("right",90,90,173,20)
sleep(9)
i01.setHandSpeed("left",1.0,1.0,1.0,1.0,1.0,1.0)
i01.setHandSpeed("right",1.0,1.0,1.0,1.0,1.0,1.0)
i01.moveHead(180,86)
i01.moveArm("left",5,90,170,10)
i01.moveArm("right",5,90,173,10)
i01.moveHand("left",2,2,2,2,2,33)
i01.moveHand("right",2,2,2,2,2,180)
i01.moveTorso(90,90,90)
| 31.6
| 56
| 0.646835
|
dcfb8dc8a64b1963e31eed9f15f47be0eff93bd1
| 418
|
py
|
Python
|
server/main/services/user_service.py
|
wiggitamoo/Angular-Flask-Docker-Skeleton
|
474727334d6c3618e1141b349b2dbe90828b9022
|
[
"MIT"
] | 65
|
2017-12-09T17:58:14.000Z
|
2022-03-28T00:11:09.000Z
|
server/main/services/user_service.py
|
wiggitamoo/Angular-Flask-Docker-Skeleton
|
474727334d6c3618e1141b349b2dbe90828b9022
|
[
"MIT"
] | 14
|
2018-02-10T11:42:59.000Z
|
2022-03-02T02:40:31.000Z
|
server/main/services/user_service.py
|
wiggitamoo/Angular-Flask-Docker-Skeleton
|
474727334d6c3618e1141b349b2dbe90828b9022
|
[
"MIT"
] | 40
|
2018-04-22T20:36:50.000Z
|
2021-12-14T05:17:48.000Z
|
# -*- coding: utf-8 -*-
"""
UserService class - This class holds the method related to User manipulations.
"""
from server.main.models.user import User
from server.main.services import SQLAlchemyService
class UserService(SQLAlchemyService):
__model__ = User
def __init__(self):
# Creating a parent class ref to access parent class methods.
self.parentClassRef = super(UserService, self)
| 22
| 78
| 0.722488
|
29294e09f27bd34a3b995409999d0fd79abf108c
| 318
|
py
|
Python
|
src/adverts/tasks.py
|
alekseyr/pyjobs
|
9ef6dd8edaba7eb568fcc7d7ee6723ac9c5eb0ff
|
[
"MIT"
] | 1
|
2015-11-21T19:21:23.000Z
|
2015-11-21T19:21:23.000Z
|
src/adverts/tasks.py
|
alekseyr/pyjobs
|
9ef6dd8edaba7eb568fcc7d7ee6723ac9c5eb0ff
|
[
"MIT"
] | 4
|
2015-07-27T08:30:26.000Z
|
2015-07-30T07:58:39.000Z
|
src/adverts/tasks.py
|
pyshop/pyjobs
|
9ef6dd8edaba7eb568fcc7d7ee6723ac9c5eb0ff
|
[
"MIT"
] | null | null | null |
from celery.task import periodic_task # декораторы теперь следует импортировать из celery.task
from mailer.engine import send_all
from datetime import timedelta
# this will run every 60 seconds
# send all emails in the mailer queue
@periodic_task(run_every=timedelta(seconds=60))
def email_tasks():
send_all()
| 26.5
| 95
| 0.798742
|
b0a8ab799651fa97f02d471cdd31af72160c7cf1
| 1,716
|
py
|
Python
|
filaments/registry_persistence_detection.py
|
sbilly/fibratus
|
3d6c885a997ef98a244eddd0740b6a88297d1974
|
[
"Apache-2.0"
] | 1
|
2021-04-30T05:10:13.000Z
|
2021-04-30T05:10:13.000Z
|
filaments/registry_persistence_detection.py
|
sbilly/fibratus
|
3d6c885a997ef98a244eddd0740b6a88297d1974
|
[
"Apache-2.0"
] | null | null | null |
filaments/registry_persistence_detection.py
|
sbilly/fibratus
|
3d6c885a997ef98a244eddd0740b6a88297d1974
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015/2016 by Nedim Sabic (RabbitStack)
# All Rights Reserved.
# http://rabbitstack.github.io
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Triggers when a process creates the registry value which
would enable it to execute on system startup.
"""
from filaments.support.alarm import SmtpAlarm
smtp_alarm = SmtpAlarm('smtp.live.com', port=587)
keys = ['Run', 'RunOnce', 'RunServices', 'RunServicesOnce', 'Userinit']
from_addr = 'from@domain.com'
to_addrs = ['to@domain.com']
def on_init():
set_filter('RegSetValue')
def on_next_kevent(kevent):
if kevent.thread:
process_name = kevent.thread.name
key = kevent.params.key
if key in keys:
# compose the message
message = 'The process %s has created a ' \
'persistent registry value , ' \
'under %s with content %s' \
% (process_name,
'%s/%s' % (kevent.params.hive, key),
kevent.params.value)
# send the alarm via smtp transport
smtp_alarm.emit('Registry persistence detected',
message, from_addr=from_addr, to_addrs=to_addrs)
| 35.020408
| 76
| 0.652098
|
75b1b4a5f66f7b60520b1ee180a9c7747278d80a
| 380
|
py
|
Python
|
sso/middleware/permission_denied.py
|
uktrade/export-wins-data
|
46caa444812e89abe504bec8c15aa7f7ba1a247e
|
[
"MIT"
] | 5
|
2016-09-12T12:52:45.000Z
|
2020-03-24T14:43:13.000Z
|
sso/middleware/permission_denied.py
|
uktrade/export-wins-data
|
46caa444812e89abe504bec8c15aa7f7ba1a247e
|
[
"MIT"
] | 435
|
2016-10-18T12:51:39.000Z
|
2021-06-09T17:22:08.000Z
|
sso/middleware/permission_denied.py
|
uktrade/export-wins-data
|
46caa444812e89abe504bec8c15aa7f7ba1a247e
|
[
"MIT"
] | 2
|
2016-12-06T10:37:21.000Z
|
2017-02-22T17:27:43.000Z
|
from django.utils.deprecation import MiddlewareMixin
from rest_framework import status
from django.conf import settings
class Metadata403(MiddlewareMixin):
def process_response(self, request, response):
if getattr(response, 'status_code', None) == status.HTTP_403_FORBIDDEN:
response['PreferAuthWith'] = settings.SSO_PREFER_AUTH
return response
| 31.666667
| 79
| 0.760526
|
ead8825c7772e8fc261d2f1de80ea3eefc760d6d
| 759
|
py
|
Python
|
application/configs.py
|
Jasbeauty/mine_blog
|
22899be4db20a92ad40cf35f84a3123dd0ebeee6
|
[
"Apache-2.0"
] | null | null | null |
application/configs.py
|
Jasbeauty/mine_blog
|
22899be4db20a92ad40cf35f84a3123dd0ebeee6
|
[
"Apache-2.0"
] | null | null | null |
application/configs.py
|
Jasbeauty/mine_blog
|
22899be4db20a92ad40cf35f84a3123dd0ebeee6
|
[
"Apache-2.0"
] | null | null | null |
from datetime import timedelta
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://root:root@127.0.0.1:3306/mine_blog"
SQLALCHEMY_TRACK_MODIFICATIONS = True
JWT_SECRET_KEY = 'jwt_secret'
JWT_AUTH_URL_RULE = '/api/v1/auth'
JWT_EXPIRATION_DELTA = timedelta(seconds=12000)
SYS_UPLOAD_PATH = '/home/laowang/gitwarehouse/mine_blog/application/static/img/'
GITHUB_OAUTH = {
'CLIENT_ID': 'f9fa118d12389497686b',
'CLIENT_SECRET': 'a67149f74ce50c1e95c2d9bdeba7bbd579eb8d45',
'AUTHORIZE_PATH': 'https://github.com/login/oauth/authorize',
'ACCESS_TOKEN_PATH': 'https://github.com/login/oauth/access_token',
'USER_MESSAGE_PATH': 'https://api.github.com/user',
}
TENCENT_OAUTH = {
'secret_id': '',
'secret_key': '',
'region': '',
'bucket': ''
}
| 36.142857
| 80
| 0.73386
|
29116728e827a0e3ddc01db54352fb9c9ec71999
| 343
|
py
|
Python
|
site/thicc/apps/social_auth_filter/templatetags/social_auth_filter.py
|
aldenjenkins/ThiccGaming
|
4790d2568b019438d1569d0fe4e9f9aba008b737
|
[
"BSD-3-Clause"
] | 1
|
2016-03-11T18:48:36.000Z
|
2016-03-11T18:48:36.000Z
|
social_auth_filter/templatetags/social_auth_filter.py
|
Azelphur-Servers/game.azelphur.com
|
d2410cb7b044c980d56151b9ce19442433fd8ac8
|
[
"BSD-3-Clause"
] | 21
|
2016-01-25T22:35:28.000Z
|
2022-03-11T23:14:32.000Z
|
site/thicc/apps/social_auth_filter/templatetags/social_auth_filter.py
|
aldenjenkins/ThiccGaming
|
4790d2568b019438d1569d0fe4e9f9aba008b737
|
[
"BSD-3-Clause"
] | 5
|
2016-02-12T20:13:14.000Z
|
2018-01-31T08:26:16.000Z
|
from django import template
register = template.Library()
DISPLAY_NAMES = {
'reddit': 'Reddit',
'github': 'Github',
'google-oauth2': 'Google',
'facebook': 'Facebook',
'twitter': 'Twitter',
}
@register.filter(name='social_display_name')
def social_display_name(value):
return DISPLAY_NAMES.get(value.lower(), value)
| 21.4375
| 50
| 0.6793
|
b26c57a7f70feab418696dcbae7d040a4b15128f
| 82,058
|
py
|
Python
|
sympy/integrals/rubi/tests/test_utility_function.py
|
gum3ng/sympy
|
e9414fafa976b26aa0b701a0217ab0f3b561989f
|
[
"BSD-3-Clause"
] | 1
|
2018-11-20T11:40:30.000Z
|
2018-11-20T11:40:30.000Z
|
sympy/integrals/rubi/tests/test_utility_function.py
|
gum3ng/sympy
|
e9414fafa976b26aa0b701a0217ab0f3b561989f
|
[
"BSD-3-Clause"
] | 14
|
2018-02-08T10:11:03.000Z
|
2019-04-16T10:32:46.000Z
|
sympy/integrals/rubi/tests/test_utility_function.py
|
gum3ng/sympy
|
e9414fafa976b26aa0b701a0217ab0f3b561989f
|
[
"BSD-3-Clause"
] | 1
|
2020-09-09T20:41:34.000Z
|
2020-09-09T20:41:34.000Z
|
import sys
from sympy.external import import_module
matchpy = import_module("matchpy")
if not matchpy:
#bin/test will not execute any tests now
disabled = True
if sys.version_info[:2] < (3, 6):
disabled = True
from sympy.integrals.rubi.utility_function import (Set, With, Module,
Scan, MapAnd, FalseQ, ZeroQ, NegativeQ, NonzeroQ, FreeQ, List, Log,
PositiveQ, PositiveIntegerQ, NegativeIntegerQ, IntegerQ, IntegersQ,
ComplexNumberQ, RealNumericQ, PositiveOrZeroQ,
NegativeOrZeroQ, FractionOrNegativeQ, NegQ, Equal, Unequal, IntPart,
FracPart, RationalQ, ProductQ, SumQ, NonsumQ, First, Rest,
SqrtNumberQ, LinearQ, Sqrt, ArcCosh, Coefficient,
Denominator, Hypergeometric2F1, Not, Simplify, FractionalPart, IntegerPart,
AppellF1, PolynomialQuotient, ArcTan, ArcTanh, ArcSin, ArcSinh, ArcCos,
ArcCsc, ArcCsch, Sinh, Coth, LessEqual, Less, Greater,
GreaterEqual, FractionQ, IntLinearcQ, Expand, IndependentQ, PowerQ,
IntegerPowerQ, PositiveIntegerPowerQ, FractionalPowerQ, AtomQ, ExpQ, LogQ,
Head, MemberQ, TrigQ, SinQ, CosQ, TanQ, CotQ, SecQ, CscQ, HyperbolicQ,
SinhQ, CoshQ, TanhQ, CothQ, SechQ, CschQ, InverseTrigQ, SinCosQ, SinhCoshQ,
LeafCount, Numerator, NumberQ, NumericQ, Length, ListQ, Im, Re,
InverseHyperbolicQ, InverseFunctionQ, EqQ, FractionalPowerFreeQ,
ComplexFreeQ, PolynomialQ, FactorSquareFree, PowerOfLinearQ, Exponent,
QuadraticQ, LinearPairQ, BinomialParts, TrinomialParts, PolyQ, EvenQ, OddQ,
PerfectSquareQ, NiceSqrtAuxQ, NiceSqrtQ, Together, PosAux, PosQ,
CoefficientList, ReplaceAll, ExpandLinearProduct, GCD, ContentFactor,
NumericFactor, NonnumericFactors, MakeAssocList, GensymSubst, KernelSubst,
ExpandExpression, Apart, SmartApart, MatchQ, PolynomialQuotientRemainder,
FreeFactors, NonfreeFactors, RemoveContentAux, RemoveContent, FreeTerms,
NonfreeTerms, ExpandAlgebraicFunction, CollectReciprocals, ExpandCleanup,
AlgebraicFunctionQ, Coeff, LeadTerm, RemainingTerms, LeadFactor,
RemainingFactors, LeadBase, LeadDegree, Numer, Denom, hypergeom, Expon,
MergeMonomials, PolynomialDivide, BinomialQ, TrinomialQ,
GeneralizedBinomialQ, GeneralizedTrinomialQ, FactorSquareFreeList,
PerfectPowerTest, SquareFreeFactorTest, RationalFunctionQ,
RationalFunctionFactors, NonrationalFunctionFactors, Reverse,
RationalFunctionExponents, RationalFunctionExpand, ExpandIntegrand, SimplerQ,
SimplerSqrtQ, SumSimplerQ, BinomialDegree, TrinomialDegree,
CancelCommonFactors, SimplerIntegrandQ, GeneralizedBinomialDegree,
GeneralizedBinomialParts, GeneralizedTrinomialDegree,
GeneralizedTrinomialParts, MonomialQ, MonomialSumQ, MinimumMonomialExponent,
MonomialExponent, LinearMatchQ, PowerOfLinearMatchQ, QuadraticMatchQ,
CubicMatchQ, BinomialMatchQ, TrinomialMatchQ, GeneralizedBinomialMatchQ,
GeneralizedTrinomialMatchQ, QuotientOfLinearsMatchQ, PolynomialTermQ,
PolynomialTerms, NonpolynomialTerms, PseudoBinomialParts,
NormalizePseudoBinomial, PseudoBinomialPairQ, PseudoBinomialQ,
PolynomialGCD, PolyGCD, AlgebraicFunctionFactors, NonalgebraicFunctionFactors,
QuotientOfLinearsP, QuotientOfLinearsParts, QuotientOfLinearsQ, Flatten,
Sort, AbsurdNumberQ, AbsurdNumberFactors, NonabsurdNumberFactors,
SumSimplerAuxQ, Prepend, Drop, CombineExponents, FactorInteger,
FactorAbsurdNumber, SubstForInverseFunction, SubstForFractionalPower,
SubstForFractionalPowerOfQuotientOfLinears, FractionalPowerOfQuotientOfLinears,
SubstForFractionalPowerQ, SubstForFractionalPowerAuxQ, FractionalPowerOfSquareQ,
FractionalPowerSubexpressionQ, Apply, FactorNumericGcd, MergeableFactorQ,
MergeFactor, MergeFactors, TrigSimplifyQ, TrigSimplify, TrigSimplifyRecur,
Order, FactorOrder, Smallest, OrderedQ, MinimumDegree, PositiveFactors, Sign,
NonpositiveFactors, PolynomialInAuxQ, PolynomialInQ, ExponentInAux, ExponentIn,
PolynomialInSubstAux, PolynomialInSubst, Distrib, DistributeDegree,
FunctionOfPower, DivideDegreesOfFactors, MonomialFactor, FullSimplify,
FunctionOfLinearSubst, FunctionOfLinear, NormalizeIntegrand,
NormalizeIntegrandAux, NormalizeIntegrandFactor, NormalizeIntegrandFactorBase,
NormalizeTogether, NormalizeLeadTermSigns, AbsorbMinusSign,
NormalizeSumFactors, SignOfFactor, NormalizePowerOfLinear,
SimplifyIntegrand, SimplifyTerm, TogetherSimplify, SmartSimplify,
SubstForExpn, ExpandToSum, UnifySum, UnifyTerms, UnifyTerm, CalculusQ,
FunctionOfInverseLinear, PureFunctionOfSinhQ, PureFunctionOfTanhQ,
PureFunctionOfCoshQ, IntegerQuotientQ, OddQuotientQ, EvenQuotientQ,
FindTrigFactor, FunctionOfSinhQ, FunctionOfCoshQ, OddHyperbolicPowerQ,
FunctionOfTanhQ, FunctionOfTanhWeight, FunctionOfHyperbolicQ, SmartNumerator,
SmartDenominator, ActivateTrig, ExpandTrig, TrigExpand,
SubstForTrig, SubstForHyperbolic, InertTrigFreeQ, LCM,
SubstForFractionalPowerOfLinear, FractionalPowerOfLinear,
InverseFunctionOfLinear, InertTrigQ, InertReciprocalQ, DeactivateTrig,
FixInertTrigFunction, DeactivateTrigAux, PowerOfInertTrigSumQ,
PiecewiseLinearQ, KnownTrigIntegrandQ, KnownSineIntegrandQ,
KnownTangentIntegrandQ, KnownCotangentIntegrandQ, KnownSecantIntegrandQ,
TryPureTanSubst, TryTanhSubst, TryPureTanhSubst, AbsurdNumberGCD,
AbsurdNumberGCDList, ExpandTrigExpand, ExpandTrigReduce, ExpandTrigReduceAux,
NormalizeTrig, TrigToExp, ExpandTrigToExp, TrigReduce, FunctionOfTrig,
AlgebraicTrigFunctionQ, FunctionOfHyperbolic, FunctionOfQ, FunctionOfExpnQ,
PureFunctionOfSinQ, PureFunctionOfCosQ, PureFunctionOfTanQ, PureFunctionOfCotQ,
FunctionOfCosQ, FunctionOfSinQ, OddTrigPowerQ, FunctionOfTanQ,
FunctionOfTanWeight, FunctionOfTrigQ, FunctionOfDensePolynomialsQ,
FunctionOfLog, PowerVariableExpn, PowerVariableDegree, PowerVariableSubst,
EulerIntegrandQ, FunctionOfSquareRootOfQuadratic, SquareRootOfQuadraticSubst,
Divides, EasyDQ, ProductOfLinearPowersQ, Rt, NthRoot, AtomBaseQ, SumBaseQ,
NegSumBaseQ, AllNegTermQ, SomeNegTermQ, TrigSquareQ, RtAux, TrigSquare,
IntSum, IntTerm, Map2, ConstantFactor, SameQ, ReplacePart, CommonFactors,
MostMainFactorPosition, FunctionOfExponentialQ, FunctionOfExponential,
FunctionOfExponentialFunction, FunctionOfExponentialFunctionAux,
FunctionOfExponentialTest, FunctionOfExponentialTestAux, stdev, rubi_test,
If, IntQuadraticQ, IntBinomialQ, RectifyTangent, RectifyCotangent,
Inequality, Condition, Simp, SimpHelp, SplitProduct, SplitSum, SubstFor,
SubstForAux, FresnelS, FresnelC, Erfc, Erfi, Gamma, FunctionOfTrigOfLinearQ,
ElementaryFunctionQ, Complex, UnsameQ, _SimpFixFactor,
DerivativeDivides, SimpFixFactor, _FixSimplify, FixSimplify,
_SimplifyAntiderivativeSum, SimplifyAntiderivativeSum, PureFunctionOfCothQ,
_SimplifyAntiderivative, SimplifyAntiderivative, _TrigSimplifyAux,
TrigSimplifyAux, Cancel, Part, PolyLog, D, Dist, IntegralFreeQ, Sum_doit,
rubi_exp, rubi_log, PolynomialRemainder, CoprimeQ, Distribute, ProductLog,
Floor, PolyGamma, process_trig, replace_pow_exp, ExponentList)
# TODO - Add tests for: Int, NFreeQ, PureComplexNumberQ, EllipticPi, EllipticE,
# EllipticF, ArcCot, ArcCoth, Tanh, Cosh, Sech, ArcSec, ArcSech, Subst,
# SqrtNumberSumQ, Sin, Cos, Tan, Cot, Sec, Csc, Csch, TrigHyperbolicFreeQ,
# InverseFunctionFreeQ, RealQ,
from sympy.core.add import Add
from sympy.core.expr import unchanged
from sympy.core.numbers import (E, I, oo, pi, zoo)
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.symbol import (symbols, Symbol, Wild)
from sympy.functions.elementary.exponential import exp, log as sym_log
from sympy.functions.elementary.hyperbolic import acosh, asinh, atanh, acsch, cosh, sinh, tanh, coth, sech, csch, acoth
from sympy.functions.elementary.miscellaneous import Min, sqrt
from sympy.functions.elementary.trigonometric import (cos, cot, csc, sec, sin, tan, atan, acsc, asin, acot, acos, asec, atan2)
from sympy.functions.special.error_functions import (Chi, Ci, Ei, Shi, Si, expint, li)
from sympy.functions.special.gamma_functions import (gamma, loggamma, polygamma)
from sympy.functions.special.hyper import hyper
from sympy.functions.special.zeta_functions import (polylog, zeta)
from sympy.integrals.integrals import Integral
from sympy.simplify.simplify import (nsimplify, simplify)
A, B, a, b, c, d, e, f, g, h, y, z, m, n, p, q, u, v, w, F = symbols('A B a b c d e f g h y z m n p q u v w F', real=True, imaginary=False)
x = Symbol('x')
def test_ZeroQ():
e = b*(n*p + n + 1)
d = a
assert ZeroQ(a*e - b*d*(n*(p + S(1)) + S(1)))
assert ZeroQ(S(0))
assert not ZeroQ(S(10))
assert not ZeroQ(S(-2))
assert ZeroQ(0, 2-2)
assert ZeroQ([S(2), (4), S(0), S(8)]) == [False, False, True, False]
assert ZeroQ([S(2), S(4), S(8)]) == [False, False, False]
def test_NonzeroQ():
assert NonzeroQ(S(1)) == True
def test_FreeQ():
l = [a*b, x, a + b]
assert FreeQ(l, x) == False
l = [a*b, a + b]
assert FreeQ(l, x) == True
def test_List():
assert List(a, b, c) == [a, b, c]
def test_Log():
assert Log(a) == rubi_log(a)
def test_PositiveIntegerQ():
assert PositiveIntegerQ(S(1))
assert not PositiveIntegerQ(S(-3))
assert not PositiveIntegerQ(S(0))
def test_NegativeIntegerQ():
assert not NegativeIntegerQ(S(1))
assert NegativeIntegerQ(S(-3))
assert not NegativeIntegerQ(S(0))
def test_PositiveQ():
assert PositiveQ(S(1))
assert not PositiveQ(S(-3))
assert not PositiveQ(S(0))
assert not PositiveQ(zoo)
assert not PositiveQ(I)
assert PositiveQ(b/(b*(b*c/(-a*d + b*c)) - a*(b*d/(-a*d + b*c))))
def test_IntegerQ():
assert IntegerQ(S(1))
assert not IntegerQ(S(-1.9))
assert not IntegerQ(S(0.0))
assert IntegerQ(S(-1))
def test_IntegersQ():
assert IntegersQ([S(1), S(0)])
assert not IntegersQ([S(-1.9), S(1)])
assert not IntegersQ([S(0.0), S(0)])
assert IntegersQ([S(-1), S(0), S(2)])
def test_FracPart():
assert FracPart(S(10)) == 0
assert FracPart(S(10)+0.5) == 10.5
def test_IntPart():
assert IntPart(m*n) == 0
assert IntPart(S(10)) == 10
assert IntPart(1 + m) == 1
def test_NegQ():
assert NegQ(-S(3))
assert not NegQ(S(0))
assert not NegQ(S(0))
def test_RationalQ():
assert RationalQ(S(5)/6)
assert RationalQ(S(5)/6, S(4)/5)
assert not RationalQ(Sqrt(1.6))
assert not RationalQ(Sqrt(1.6), S(5)/6)
assert not RationalQ(rubi_log(2))
def test_ArcCosh():
assert ArcCosh(x) == acosh(x)
def test_LinearQ():
assert not LinearQ(a, x)
assert LinearQ(3*x + y**2, x)
assert not LinearQ(3*x + y**2, y)
assert not LinearQ(S(3), x)
def test_Sqrt():
assert Sqrt(x) == sqrt(x)
assert Sqrt(25) == 5
def test_Util_Coefficient():
from sympy.integrals.rubi.utility_function import Util_Coefficient
assert unchanged(Util_Coefficient, a + b*x + c*x**3, x, a)
assert Util_Coefficient(a + b*x + c*x**3, x, 4).doit() == 0
def test_Coefficient():
assert Coefficient(7 + 2*x + 4*x**3, x, 1) == 2
assert Coefficient(a + b*x + c*x**3, x, 0) == a
assert Coefficient(a + b*x + c*x**3, x, 4) == 0
assert Coefficient(b*x + c*x**3, x, 3) == c
assert Coefficient(x, x, -1) == 0
def test_Denominator():
assert Denominator(-S(1)/S(2) + I/3) == 6
assert Denominator((-a/b)**3) == (b)**(3)
assert Denominator(S(3)/2) == 2
assert Denominator(x/y) == y
assert Denominator(S(4)/5) == 5
def test_Hypergeometric2F1():
assert Hypergeometric2F1(1, 2, 3, x) == hyper((1, 2), (3,), x)
def test_ArcTan():
assert ArcTan(x) == atan(x)
assert ArcTan(x, y) == atan2(x, y)
def test_Not():
a = 10
assert Not(a == 2)
def test_FractionalPart():
assert FractionalPart(S(3.0)) == 0.0
def test_IntegerPart():
assert IntegerPart(3.6) == 3
assert IntegerPart(-3.6) == -4
def test_AppellF1():
assert AppellF1(1,0,0.5,1,0.5,0.25).evalf() == 1.154700538379251529018298
assert unchanged(AppellF1, a, b, c, d, e, f)
def test_Simplify():
assert Simplify(sin(x)**2 + cos(x)**2) == 1
assert Simplify((x**3 + x**2 - x - 1)/(x**2 + 2*x + 1)) == x - 1
def test_ArcTanh():
assert ArcTanh(a) == atanh(a)
def test_ArcSin():
assert ArcSin(a) == asin(a)
def test_ArcSinh():
assert ArcSinh(a) == asinh(a)
def test_ArcCos():
assert ArcCos(a) == acos(a)
def test_ArcCsc():
assert ArcCsc(a) == acsc(a)
def test_ArcCsch():
assert ArcCsch(a) == acsch(a)
def test_Equal():
assert Equal(a, a)
assert not Equal(a, b)
def test_LessEqual():
assert LessEqual(1, 2, 3)
assert LessEqual(1, 1)
assert not LessEqual(3, 2, 1)
def test_With():
assert With(Set(x, 3), x + y) == 3 + y
assert With(List(Set(x, 3), Set(y, c)), x + y) == 3 + c
def test_Module():
# Same as With
assert Module(Set(x, 3), x + y) == 3 + y
assert Module(List(Set(x, 3), Set(y, c)), x + y) == 3 + c
def test_Less():
assert Less(1, 2, 3)
assert not Less(1, 1, 3)
def test_Greater():
assert Greater(3, 2, 1)
assert not Greater(3, 2, 2)
def test_GreaterEqual():
assert GreaterEqual(3, 2, 1)
assert GreaterEqual(3, 2, 2)
assert not GreaterEqual(2, 3)
def test_Unequal():
assert Unequal(1, 2)
assert not Unequal(1, 1)
def test_FractionQ():
assert not FractionQ(S('3'))
assert FractionQ(S('3')/S('2'))
def test_Expand():
assert Expand((1 + x)**10) == x**10 + 10*x**9 + 45*x**8 + 120*x**7 + 210*x**6 + 252*x**5 + 210*x**4 + 120*x**3 + 45*x**2 + 10*x + 1
def test_Scan():
assert list(Scan(sin, [a, b])) == [sin(a), sin(b)]
def test_MapAnd():
assert MapAnd(PositiveQ, [S(1), S(2), S(3), S(0)]) == False
assert MapAnd(PositiveQ, [S(1), S(2), S(3)]) == True
def test_FalseQ():
assert FalseQ(True) == False
assert FalseQ(False) == True
def test_ComplexNumberQ():
assert ComplexNumberQ(1 + I*2, I) == True
assert ComplexNumberQ(a + b, I) == False
def test_Re():
assert Re(1 + I) == 1
def test_Im():
assert Im(1 + 2*I) == 2
assert Im(a*I) == a
def test_PositiveOrZeroQ():
assert PositiveOrZeroQ(S(0)) == True
assert PositiveOrZeroQ(S(1)) == True
assert PositiveOrZeroQ(-S(1)) == False
def test_RealNumericQ():
assert RealNumericQ(S(1)) == True
assert RealNumericQ(-S(1)) == True
def test_NegativeOrZeroQ():
assert NegativeOrZeroQ(S(0)) == True
assert NegativeOrZeroQ(-S(1)) == True
assert NegativeOrZeroQ(S(1)) == False
def test_FractionOrNegativeQ():
assert FractionOrNegativeQ(S(1)/2) == True
assert FractionOrNegativeQ(-S(1)) == True
assert FractionOrNegativeQ(-S(1)/2) == True
assert FractionOrNegativeQ(S(1)) == False
def test_NegativeQ():
assert NegativeQ(-S(1)) == True
assert NegativeQ(S(1)) == False
assert NegativeQ(oo) == False
def test_ProductQ():
assert ProductQ(a*b) == True
assert ProductQ(a + b) == False
def test_SumQ():
assert SumQ(a*b) == False
assert SumQ(a + b) == True
def test_NonsumQ():
assert NonsumQ(a*b) == True
assert NonsumQ(a + b) == False
def test_SqrtNumberQ():
assert SqrtNumberQ(sqrt(2)) == True
def test_IntLinearcQ():
assert IntLinearcQ(1, 2, 3, 4, 5, 6, x) == True
assert IntLinearcQ(S(1)/100, S(2)/100, S(3)/100, S(4)/100, S(5)/100, S(6)/100, x) == False
def test_IndependentQ():
assert IndependentQ(a + b*x, x) == False
assert IndependentQ(a + b, x) == True
def test_PowerQ():
assert PowerQ(a**b) == True
assert PowerQ(a + b) == False
def test_IntegerPowerQ():
assert IntegerPowerQ(a**2) == True
assert IntegerPowerQ(a**0.5) == False
def test_PositiveIntegerPowerQ():
assert PositiveIntegerPowerQ(a**3) == True
assert PositiveIntegerPowerQ(a**(-2)) == False
def test_FractionalPowerQ():
assert FractionalPowerQ(a**(S(2)/S(3)))
assert FractionalPowerQ(a**sqrt(2)) == False
def test_AtomQ():
assert AtomQ(x)
assert not AtomQ(x+1)
assert not AtomQ([a, b])
def test_ExpQ():
assert ExpQ(E**2)
assert not ExpQ(2**E)
def test_LogQ():
assert LogQ(rubi_log(x))
assert not LogQ(sin(x) + rubi_log(x))
def test_Head():
assert Head(sin(x)) == sin
assert Head(rubi_log(x**3 + 3)) in (sym_log, rubi_log)
def test_MemberQ():
assert MemberQ([a, b, c], b)
assert MemberQ([sin, cos, log, tan], Head(sin(x)))
assert MemberQ([[sin, cos], [tan, cot]], [sin, cos])
assert not MemberQ([[sin, cos], [tan, cot]], [sin, tan])
def test_TrigQ():
assert TrigQ(sin(x))
assert TrigQ(tan(x**2 + 2))
assert not TrigQ(sin(x) + tan(x))
def test_SinQ():
assert SinQ(sin(x))
assert not SinQ(tan(x))
def test_CosQ():
assert CosQ(cos(x))
assert not CosQ(csc(x))
def test_TanQ():
assert TanQ(tan(x))
assert not TanQ(cot(x))
def test_CotQ():
assert not CotQ(tan(x))
assert CotQ(cot(x))
def test_SecQ():
assert SecQ(sec(x))
assert not SecQ(csc(x))
def test_CscQ():
assert not CscQ(sec(x))
assert CscQ(csc(x))
def test_HyperbolicQ():
assert HyperbolicQ(sinh(x))
assert HyperbolicQ(cosh(x))
assert HyperbolicQ(tanh(x))
assert not HyperbolicQ(sinh(x) + cosh(x) + tanh(x))
def test_SinhQ():
assert SinhQ(sinh(x))
assert not SinhQ(cosh(x))
def test_CoshQ():
assert not CoshQ(sinh(x))
assert CoshQ(cosh(x))
def test_TanhQ():
assert TanhQ(tanh(x))
assert not TanhQ(coth(x))
def test_CothQ():
assert not CothQ(tanh(x))
assert CothQ(coth(x))
def test_SechQ():
assert SechQ(sech(x))
assert not SechQ(csch(x))
def test_CschQ():
assert not CschQ(sech(x))
assert CschQ(csch(x))
def test_InverseTrigQ():
assert InverseTrigQ(acot(x))
assert InverseTrigQ(asec(x))
assert not InverseTrigQ(acsc(x) + asec(x))
def test_SinCosQ():
assert SinCosQ(sin(x))
assert SinCosQ(cos(x))
assert SinCosQ(sec(x))
assert not SinCosQ(acsc(x))
def test_SinhCoshQ():
assert not SinhCoshQ(sin(x))
assert SinhCoshQ(cosh(x))
assert SinhCoshQ(sech(x))
assert SinhCoshQ(csch(x))
def test_LeafCount():
assert LeafCount(1 + a + x**2) == 6
def test_Numerator():
assert Numerator(-S(1)/S(2) + I/3) == -3 + 2*I
assert Numerator((-a/b)**3) == (-a)**(3)
assert Numerator(S(3)/2) == 3
assert Numerator(x/y) == x
def test_Length():
assert Length(a + b) == 2
assert Length(sin(a)*cos(a)) == 2
def test_ListQ():
assert ListQ([1, 2])
assert not ListQ(a)
def test_InverseHyperbolicQ():
assert InverseHyperbolicQ(acosh(a))
def test_InverseFunctionQ():
assert InverseFunctionQ(rubi_log(a))
assert InverseFunctionQ(acos(a))
assert not InverseFunctionQ(a)
assert InverseFunctionQ(acosh(a))
assert InverseFunctionQ(polylog(a, b))
def test_EqQ():
assert EqQ(a, a)
assert not EqQ(a, b)
def test_FactorSquareFree():
assert FactorSquareFree(x**5 - x**3 - x**2 + 1) == (x**3 + 2*x**2 + 2*x + 1)*(x - 1)**2
def test_FactorSquareFreeList():
assert FactorSquareFreeList(x**5-x**3-x**2 + 1) == [[1, 1], [x**3 + 2*x**2 + 2*x + 1, 1], [x - 1, 2]]
assert FactorSquareFreeList(x**4 - 2*x**2 + 1) == [[1, 1], [x**2 - 1, 2]]
def test_PerfectPowerTest():
assert not PerfectPowerTest(sqrt(x), x)
assert not PerfectPowerTest(x**5-x**3-x**2 + 1, x)
assert PerfectPowerTest(x**4 - 2*x**2 + 1, x) == (x**2 - 1)**2
def test_SquareFreeFactorTest():
assert not SquareFreeFactorTest(sqrt(x), x)
assert SquareFreeFactorTest(x**5 - x**3 - x**2 + 1, x) == (x**3 + 2*x**2 + 2*x + 1)*(x - 1)**2
def test_Rest():
assert Rest([2, 3, 5, 7]) == [3, 5, 7]
assert Rest(a + b + c) == b + c
assert Rest(a*b*c) == b*c
assert Rest(1/b) == -1
def test_First():
assert First([2, 3, 5, 7]) == 2
assert First(y**S(2)) == y
assert First(a + b + c) == a
assert First(a*b*c) == a
def test_ComplexFreeQ():
assert ComplexFreeQ(a)
assert not ComplexFreeQ(a + 2*I)
def test_FractionalPowerFreeQ():
assert not FractionalPowerFreeQ(x**(S(2)/3))
assert FractionalPowerFreeQ(x)
def test_Exponent():
assert Min(ExponentList(x**2 + x + 1 + 5, x)) == 0
assert ExponentList(x**2 + x + 1 + 5, x) == [0, 1, 2]
assert ExponentList(x**2 + x + 1, x) == [0, 1, 2]
assert ExponentList(x**2 + 2*x + 1, x) == [0, 1, 2]
assert Exponent(x**3 + x + 1, x) == 3
assert Exponent(x**2 + 2*x + 1, x) == 2
assert ExponentList(x**3, x) == [3]
assert Exponent(S(1), x) == 0
assert Exponent(x**(-3), x) == 0
def test_Expon():
assert Expon(x**2+2*x+1, x) == 2
def test_QuadraticQ():
assert not QuadraticQ([x**2+x+1, 5*x**2], x)
assert QuadraticQ([x**2+x+1, 5*x**2+3*x+6], x)
assert not QuadraticQ(x**2+1+x**3, x)
assert QuadraticQ(x**2+1+x, x)
assert not QuadraticQ(x**2, x)
def test_BinomialQ():
assert BinomialQ(x**9, x)
assert not BinomialQ((1 + x)**3, x)
def test_BinomialParts():
assert BinomialParts(2 + x*(9*x), x) == [2, 9, 2]
assert BinomialParts(x**9, x) == [0, 1, 9]
assert BinomialParts(2*x**3, x) == [0, 2, 3]
assert BinomialParts(2 + x, x) == [2, 1, 1]
def test_BinomialDegree():
assert BinomialDegree(b + 2*c*x**n, x) == n
assert BinomialDegree(2 + x*(9*x), x) == 2
assert BinomialDegree(x**9, x) == 9
def test_PolynomialQ():
assert not PolynomialQ(x*(-1 + x**2), (1 + x)**(S(1)/2))
assert not PolynomialQ((16*x + 1)/((x + 5)**2*(x**2 + x + 1)), 2*x)
C = Symbol('C')
assert not PolynomialQ(A + b*x + c*x**2, x**2)
assert PolynomialQ(A + B*x + C*x**2)
assert PolynomialQ(A + B*x**4 + C*x**2, x**2)
assert PolynomialQ(x**3, x)
assert not PolynomialQ(sqrt(x), x)
def test_PolyQ():
assert PolyQ(-2*a*d**3*e**2 + x**6*(a*e**5 - b*d*e**4 + c*d**2*e**3)\
+ x**4*(-2*a*d*e**4 + 2*b*d**2*e**3 - 2*c*d**3*e**2) + x**2*(2*a*d**2*e**3 - 2*b*d**3*e**2), x)
assert not PolyQ(1/sqrt(a + b*x**2 - c*x**4), x**2)
assert PolyQ(x, x, 1)
assert PolyQ(x**2, x, 2)
assert not PolyQ(x**3, x, 2)
def test_EvenQ():
assert EvenQ(S(2))
assert not EvenQ(S(1))
def test_OddQ():
assert OddQ(S(1))
assert not OddQ(S(2))
def test_PerfectSquareQ():
assert PerfectSquareQ(S(4))
assert PerfectSquareQ(a**S(2)*b**S(4))
assert not PerfectSquareQ(S(1)/3)
def test_NiceSqrtQ():
assert NiceSqrtQ(S(1)/3)
assert not NiceSqrtQ(-S(1))
assert NiceSqrtQ(pi**2)
assert NiceSqrtQ(pi**2*sin(4)**4)
assert not NiceSqrtQ(pi**2*sin(4)**3)
def test_Together():
assert Together(1/a + b/2) == (a*b + 2)/(2*a)
def test_PosQ():
#assert not PosQ((b*e - c*d)/(c*e))
assert not PosQ(S(0))
assert PosQ(S(1))
assert PosQ(pi)
assert PosQ(pi**3)
assert PosQ((-pi)**4)
assert PosQ(sin(1)**2*pi**4)
def test_NumericQ():
assert NumericQ(sin(cos(2)))
def test_NumberQ():
assert NumberQ(pi)
def test_CoefficientList():
assert CoefficientList(1 + a*x, x) == [1, a]
assert CoefficientList(1 + a*x**3, x) == [1, 0, 0, a]
assert CoefficientList(sqrt(x), x) == []
def test_ReplaceAll():
assert ReplaceAll(x, {x: a}) == a
assert ReplaceAll(a*x, {x: a + b}) == a*(a + b)
assert ReplaceAll(a*x, {a: b, x: a + b}) == b*(a + b)
def test_ExpandLinearProduct():
assert ExpandLinearProduct(rubi_log(x), x**2, a, b, x) == a**2*rubi_log(x)/b**2 - 2*a*(a + b*x)*rubi_log(x)/b**2 + (a + b*x)**2*rubi_log(x)/b**2
assert ExpandLinearProduct((a + b*x)**n, x**3, a, b, x) == -a**3*(a + b*x)**n/b**3 + 3*a**2*(a + b*x)**(n + 1)/b**3 - 3*a*(a + b*x)**(n + 2)/b**3 + (a + b*x)**(n + 3)/b**3
def test_PolynomialDivide():
assert PolynomialDivide((a*c - b*c*x)**2, (a + b*x)**2, x) == -4*a*b*c**2*x/(a + b*x)**2 + c**2
assert PolynomialDivide(x + x**2, x, x) == x + 1
assert PolynomialDivide((1 + x)**3, (1 + x)**2, x) == x + 1
assert PolynomialDivide((a + b*x)**3, x**3, x) == a*(a**2 + 3*a*b*x + 3*b**2*x**2)/x**3 + b**3
assert PolynomialDivide(x**3*(a + b*x), S(1), x) == b*x**4 + a*x**3
assert PolynomialDivide(x**6, (a + b*x)**2, x) == -a**5*(5*a + 6*b*x)/(b**6*(a + b*x)**2) + 5*a**4/b**6 - 4*a**3*x/b**5 + 3*a**2*x**2/b**4 - 2*a*x**3/b**3 + x**4/b**2
def test_MatchQ():
a_ = Wild('a', exclude=[x])
b_ = Wild('b', exclude=[x])
c_ = Wild('c', exclude=[x])
assert MatchQ(a*b + c, a_*b_ + c_, a_, b_, c_) == (a, b, c)
def test_PolynomialQuotientRemainder():
assert PolynomialQuotientRemainder(x**2, x+a, x) == [-a + x, a**2]
def test_FreeFactors():
assert FreeFactors(a, x) == a
assert FreeFactors(x + a, x) == 1
assert FreeFactors(a*b*x, x) == a*b
def test_NonfreeFactors():
assert NonfreeFactors(a, x) == 1
assert NonfreeFactors(x + a, x) == x + a
assert NonfreeFactors(a*b*x, x) == x
def test_FreeTerms():
assert FreeTerms(a, x) == a
assert FreeTerms(x*a, x) == 0
assert FreeTerms(a*x + b, x) == b
def test_NonfreeTerms():
assert NonfreeTerms(a, x) == 0
assert NonfreeTerms(a*x, x) == a*x
assert NonfreeTerms(a*x + b, x) == a*x
def test_RemoveContent():
assert RemoveContent(a + b*x, x) == a + b*x
def test_ExpandAlgebraicFunction():
assert ExpandAlgebraicFunction((a + b)*x, x) == a*x + b*x
assert ExpandAlgebraicFunction((a + b)**2*x, x)== a**2*x + 2*a*b*x + b**2*x
assert ExpandAlgebraicFunction((a + b)**2*x**2, x) == a**2*x**2 + 2*a*b*x**2 + b**2*x**2
def test_CollectReciprocals():
assert CollectReciprocals(-1/(1 + 1*x) - 1/(1 - 1*x), x) == -2/(-x**2 + 1)
assert CollectReciprocals(1/(1 + 1*x) - 1/(1 - 1*x), x) == -2*x/(-x**2 + 1)
def test_ExpandCleanup():
assert ExpandCleanup(a + b, x) == a*(1 + b/a)
assert ExpandCleanup(b**2/(a**2*(a + b*x)**2) + 1/(a**2*x**2) + 2*b**2/(a**3*(a + b*x)) - 2*b/(a**3*x), x) == b**2/(a**2*(a + b*x)**2) + 1/(a**2*x**2) + 2*b**2/(a**3*(a + b*x)) - 2*b/(a**3*x)
def test_AlgebraicFunctionQ():
assert not AlgebraicFunctionQ(1/(a + c*x**(2*n)), x)
assert AlgebraicFunctionQ(a, x) == True
assert AlgebraicFunctionQ(a*b, x) == True
assert AlgebraicFunctionQ(x**2, x) == True
assert AlgebraicFunctionQ(x**2*a, x) == True
assert AlgebraicFunctionQ(x**2 + a, x) == True
assert AlgebraicFunctionQ(sin(x), x) == False
assert AlgebraicFunctionQ([], x) == True
assert AlgebraicFunctionQ([a, a*b], x) == True
assert AlgebraicFunctionQ([sin(x)], x) == False
def test_MonomialQ():
assert not MonomialQ(2*x**7 + 6, x)
assert MonomialQ(2*x**7, x)
assert not MonomialQ(2*x**7 + 5*x**3, x)
assert not MonomialQ([2*x**7 + 6, 2*x**7], x)
assert MonomialQ([2*x**7, 5*x**3], x)
def test_MonomialSumQ():
assert MonomialSumQ(2*x**7 + 6, x) == True
assert MonomialSumQ(x**2 + x**3 + 5*x, x) == True
def test_MinimumMonomialExponent():
assert MinimumMonomialExponent(x**2 + 5*x**2 + 3*x**5, x) == 2
assert MinimumMonomialExponent(x**2 + 5*x**2 + 1, x) == 0
def test_MonomialExponent():
assert MonomialExponent(3*x**7, x) == 7
assert not MonomialExponent(3+x**3, x)
def test_LinearMatchQ():
assert LinearMatchQ(2 + 3*x, x)
assert LinearMatchQ(3*x, x)
assert not LinearMatchQ(3*x**2, x)
def test_SimplerQ():
a1, b1 = symbols('a1 b1')
assert SimplerQ(a1, b1)
assert SimplerQ(2*a, a + 2)
assert SimplerQ(2, x)
assert not SimplerQ(x**2, x)
assert SimplerQ(2*x, x + 2 + 6*x**3)
def test_GeneralizedTrinomialParts():
assert not GeneralizedTrinomialParts((7 + 2*x**6 + 3*x**12), x)
assert GeneralizedTrinomialParts(x**2 + x**3 + x**4, x) == [1, 1, 1, 3, 2]
assert not GeneralizedTrinomialParts(2*x + 3*x + 4*x, x)
def test_TrinomialQ():
assert TrinomialQ((7 + 2*x**6 + 3*x**12), x)
assert not TrinomialQ(x**2, x)
def test_GeneralizedTrinomialDegree():
assert not GeneralizedTrinomialDegree((7 + 2*x**6 + 3*x**12), x)
assert GeneralizedTrinomialDegree(x**2 + x**3 + x**4, x) == 1
def test_GeneralizedBinomialParts():
assert GeneralizedBinomialParts(3*x*(3 + x**6), x) == [9, 3, 7, 1]
assert GeneralizedBinomialParts((3*x + x**7), x) == [3, 1, 7, 1]
def test_GeneralizedBinomialDegree():
assert GeneralizedBinomialDegree(3*x*(3 + x**6), x) == 6
assert GeneralizedBinomialDegree((3*x + x**7), x) == 6
def test_PowerOfLinearQ():
assert PowerOfLinearQ((6*x), x)
assert not PowerOfLinearQ((3 + 6*x**3), x)
assert PowerOfLinearQ((3 + 6*x)**3, x)
def test_LinearPairQ():
assert not LinearPairQ(6*x**2 + 4, 3*x**2 + 2, x)
assert LinearPairQ(6*x + 4, 3*x + 2, x)
assert not LinearPairQ(6*x, 3*x + 2, x)
assert LinearPairQ(6*x, 3*x, x)
def test_LeadTerm():
assert LeadTerm(a*b*c) == a*b*c
assert LeadTerm(a + b + c) == a
def test_RemainingTerms():
assert RemainingTerms(a*b*c) == a*b*c
assert RemainingTerms(a + b + c) == b + c
def test_LeadFactor():
assert LeadFactor(a*b*c) == a
assert LeadFactor(a + b + c) == a + b + c
assert LeadFactor(b*I) == I
assert LeadFactor(c*a**b) == a**b
assert LeadFactor(S(2)) == S(2)
def test_RemainingFactors():
assert RemainingFactors(a*b*c) == b*c
assert RemainingFactors(a + b + c) == 1
assert RemainingFactors(a*I) == a
def test_LeadBase():
assert LeadBase(a**b) == a
assert LeadBase(a**b*c) == a
def test_LeadDegree():
assert LeadDegree(a**b) == b
assert LeadDegree(a**b*c) == b
def test_Numer():
assert Numer(a/b) == a
assert Numer(a**(-2)) == 1
assert Numer(a**(-2)*a/b) == 1
def test_Denom():
assert Denom(a/b) == b
assert Denom(a**(-2)) == a**2
assert Denom(a**(-2)*a/b) == a*b
def test_Coeff():
assert Coeff(7 + 2*x + 4*x**3, x, 1) == 2
assert Coeff(a + b*x + c*x**3, x, 0) == a
assert Coeff(a + b*x + c*x**3, x, 4) == 0
assert Coeff(b*x + c*x**3, x, 3) == c
def test_MergeMonomials():
assert MergeMonomials(x**2*(1 + 1*x)**3*(1 + 1*x)**n, x) == x**2*(x + 1)**(n + 3)
assert MergeMonomials(x**2*(1 + 1*x)**2*(1*(1 + 1*x)**1)**2, x) == x**2*(x + 1)**4
assert MergeMonomials(b**2/a**3, x) == b**2/a**3
def test_RationalFunctionQ():
assert RationalFunctionQ(a, x)
assert RationalFunctionQ(x**2, x)
assert RationalFunctionQ(x**3 + x**4, x)
assert RationalFunctionQ(x**3*S(2), x)
assert not RationalFunctionQ(x**3 + x**(0.5), x)
assert not RationalFunctionQ(x**(S(2)/3)*(a + b*x)**2, x)
def test_Apart():
assert Apart(1/(x**2*(a + b*x)**2), x) == b**2/(a**2*(a + b*x)**2) + 1/(a**2*x**2) + 2*b**2/(a**3*(a + b*x)) - 2*b/(a**3*x)
assert Apart(x**(S(2)/3)*(a + b*x)**2, x) == x**(S(2)/3)*(a + b*x)**2
def test_RationalFunctionFactors():
assert RationalFunctionFactors(a, x) == a
assert RationalFunctionFactors(sqrt(x), x) == 1
assert RationalFunctionFactors(x*x**3, x) == x*x**3
assert RationalFunctionFactors(x*sqrt(x), x) == 1
def test_NonrationalFunctionFactors():
assert NonrationalFunctionFactors(x, x) == 1
assert NonrationalFunctionFactors(sqrt(x), x) == sqrt(x)
assert NonrationalFunctionFactors(sqrt(x)*rubi_log(x), x) == sqrt(x)*rubi_log(x)
def test_Reverse():
assert Reverse([1, 2, 3]) == [3, 2, 1]
assert Reverse(a**b) == b**a
def test_RationalFunctionExponents():
assert RationalFunctionExponents(sqrt(x), x) == [0, 0]
assert RationalFunctionExponents(a, x) == [0, 0]
assert RationalFunctionExponents(x, x) == [1, 0]
assert RationalFunctionExponents(x**(-1), x)== [0, 1]
assert RationalFunctionExponents(x**(-1)*a, x) == [0, 1]
assert RationalFunctionExponents(x**(-1) + a, x) == [1, 1]
def test_PolynomialGCD():
assert PolynomialGCD(x**2 - 1, x**2 - 3*x + 2) == x - 1
def test_PolyGCD():
assert PolyGCD(x**2 - 1, x**2 - 3*x + 2, x) == x - 1
def test_AlgebraicFunctionFactors():
assert AlgebraicFunctionFactors(sin(x)*x, x) == x
assert AlgebraicFunctionFactors(sin(x), x) == 1
assert AlgebraicFunctionFactors(x, x) == x
def test_NonalgebraicFunctionFactors():
assert NonalgebraicFunctionFactors(sin(x)*x, x) == sin(x)
assert NonalgebraicFunctionFactors(sin(x), x) == sin(x)
assert NonalgebraicFunctionFactors(x, x) == 1
def test_QuotientOfLinearsP():
assert QuotientOfLinearsP((a + b*x)/(x), x)
assert QuotientOfLinearsP(x*a, x)
assert not QuotientOfLinearsP(x**2*a, x)
assert not QuotientOfLinearsP(x**2 + a, x)
assert QuotientOfLinearsP(x + a, x)
assert QuotientOfLinearsP(x, x)
assert QuotientOfLinearsP(1 + x, x)
def test_QuotientOfLinearsParts():
assert QuotientOfLinearsParts((b*x)/(c), x) == [0, b/c, 1, 0]
assert QuotientOfLinearsParts((b*x)/(c + x), x) == [0, b, c, 1]
assert QuotientOfLinearsParts((b*x)/(c + d*x), x) == [0, b, c, d]
assert QuotientOfLinearsParts((a + b*x)/(c + d*x), x) == [a, b, c, d]
assert QuotientOfLinearsParts(x**2 + a, x) == [a + x**2, 0, 1, 0]
assert QuotientOfLinearsParts(a/x, x) == [a, 0, 0, 1]
assert QuotientOfLinearsParts(1/x, x) == [1, 0, 0, 1]
assert QuotientOfLinearsParts(a*x + 1, x) == [1, a, 1, 0]
assert QuotientOfLinearsParts(x, x) == [0, 1, 1, 0]
assert QuotientOfLinearsParts(a, x) == [a, 0, 1, 0]
def test_QuotientOfLinearsQ():
assert not QuotientOfLinearsQ((a + x), x)
assert QuotientOfLinearsQ((a + x)/(x), x)
assert QuotientOfLinearsQ((a + b*x)/(x), x)
def test_Flatten():
assert Flatten([a, b, [c, [d, e]]]) == [a, b, c, d, e]
def test_Sort():
assert Sort([b, a, c]) == [a, b, c]
assert Sort([b, a, c], True) == [c, b, a]
def test_AbsurdNumberQ():
assert AbsurdNumberQ(S(1))
assert not AbsurdNumberQ(a*x)
assert not AbsurdNumberQ(a**(S(1)/2))
assert AbsurdNumberQ((S(1)/3)**(S(1)/3))
def test_AbsurdNumberFactors():
assert AbsurdNumberFactors(S(1)) == S(1)
assert AbsurdNumberFactors((S(1)/3)**(S(1)/3)) == S(3)**(S(2)/3)/S(3)
assert AbsurdNumberFactors(a) == S(1)
def test_NonabsurdNumberFactors():
assert NonabsurdNumberFactors(a) == a
assert NonabsurdNumberFactors(S(1)) == S(1)
assert NonabsurdNumberFactors(a*S(2)) == a
def test_NumericFactor():
assert NumericFactor(S(1)) == S(1)
assert NumericFactor(1*I) == S(1)
assert NumericFactor(S(1) + I) == S(1)
assert NumericFactor(a**(S(1)/3)) == S(1)
assert NumericFactor(a*S(3)) == S(3)
assert NumericFactor(a + b) == S(1)
def test_NonnumericFactors():
assert NonnumericFactors(S(3)) == S(1)
assert NonnumericFactors(I) == I
assert NonnumericFactors(S(3) + I) == S(3) + I
assert NonnumericFactors((S(1)/3)**(S(1)/3)) == S(1)
assert NonnumericFactors(rubi_log(a)) == rubi_log(a)
def test_Prepend():
assert Prepend([1, 2, 3], [4, 5]) == [4, 5, 1, 2, 3]
def test_SumSimplerQ():
assert not SumSimplerQ(S(4 + x),S(3 + x**3))
assert SumSimplerQ(S(4 + x), S(3 - x))
def test_SumSimplerAuxQ():
assert SumSimplerAuxQ(S(4 + x), S(3 - x))
assert not SumSimplerAuxQ(S(4), S(3))
def test_SimplerSqrtQ():
assert SimplerSqrtQ(S(2), S(16*x**3))
assert not SimplerSqrtQ(S(x*2), S(16))
assert not SimplerSqrtQ(S(-4), S(16))
assert SimplerSqrtQ(S(4), S(16))
assert not SimplerSqrtQ(S(4), S(0))
def test_TrinomialParts():
assert TrinomialParts((1 + 5*x**3)**2, x) == [1, 10, 25, 3]
assert TrinomialParts(1 + 5*x**3 + 2*x**6, x) == [1, 5, 2, 3]
assert TrinomialParts(((1 + 5*x**3)**2) + 6, x) == [7, 10, 25, 3]
assert not TrinomialParts(1 + 5*x**3 + 2*x**5, x)
def test_TrinomialDegree():
assert TrinomialDegree((7 + 2*x**6)**2, x) == 6
assert TrinomialDegree(1 + 5*x**3 + 2*x**6, x) == 3
assert not TrinomialDegree(1 + 5*x**3 + 2*x**5, x)
def test_CubicMatchQ():
assert not CubicMatchQ(S(3 + x**6), x)
assert CubicMatchQ(S(x**3), x)
assert not CubicMatchQ(S(3), x)
assert CubicMatchQ(S(3 + x**3), x)
assert CubicMatchQ(S(3 + x**3 + 2*x), x)
def test_BinomialMatchQ():
assert BinomialMatchQ(x, x)
assert BinomialMatchQ(2 + 3*x**5, x)
assert BinomialMatchQ(3*x**5, x)
assert BinomialMatchQ(3*x, x)
assert not BinomialMatchQ(x + x**2 + x**3, x)
def test_TrinomialMatchQ():
assert not TrinomialMatchQ((5 + 2*x**6)**2, x)
assert not TrinomialMatchQ((7 + 8*x**6), x)
assert TrinomialMatchQ((7 + 2*x**6 + 3*x**3), x)
assert TrinomialMatchQ(b*x**2 + c*x**4, x)
def test_GeneralizedBinomialMatchQ():
assert not GeneralizedBinomialMatchQ((1 + x**4), x)
assert GeneralizedBinomialMatchQ((3*x + x**7), x)
def test_QuadraticMatchQ():
assert not QuadraticMatchQ((a + b*x)*(c + d*x), x)
assert QuadraticMatchQ(x**2 + x, x)
assert QuadraticMatchQ(x**2+1+x, x)
assert QuadraticMatchQ(x**2, x)
def test_PowerOfLinearMatchQ():
assert PowerOfLinearMatchQ(x, x)
assert not PowerOfLinearMatchQ(S(6)**3, x)
assert not PowerOfLinearMatchQ(S(6 + 3*x**2)**3, x)
assert PowerOfLinearMatchQ(S(6 + 3*x)**3, x)
def test_GeneralizedTrinomialMatchQ():
assert not GeneralizedTrinomialMatchQ(7 + 2*x**6 + 3*x**12, x)
assert not GeneralizedTrinomialMatchQ(7 + 2*x**6 + 3*x**3, x)
assert not GeneralizedTrinomialMatchQ(7 + 2*x**6 + 3*x**5, x)
assert GeneralizedTrinomialMatchQ(x**2 + x**3 + x**4, x)
def test_QuotientOfLinearsMatchQ():
assert QuotientOfLinearsMatchQ((1 + x)*(3 + 4*x**2)/(2 + 4*x), x)
assert not QuotientOfLinearsMatchQ(x*(3 + 4*x**2)/(2 + 4*x**3), x)
assert QuotientOfLinearsMatchQ(x*(3 + 4*x)/(2 + 4*x), x)
assert QuotientOfLinearsMatchQ(2*(3 + 4*x)/(2 + 4*x), x)
def test_PolynomialTermQ():
assert not PolynomialTermQ(S(3), x)
assert PolynomialTermQ(3*x**6, x)
assert not PolynomialTermQ(3*x**6+5*x, x)
def test_PolynomialTerms():
assert PolynomialTerms(x + 6*x**3 + rubi_log(x), x) == 6*x**3 + x
assert PolynomialTerms(x + 6*x**3 + 6*x, x) == 6*x**3 + 7*x
assert PolynomialTerms(x + 6*x**3 + 6, x) == 6*x**3 + x
def test_NonpolynomialTerms():
assert NonpolynomialTerms(x + 6*x**3 + rubi_log(x), x) == rubi_log(x)
assert NonpolynomialTerms(x + 6*x**3 + 6*x, x) == 0
assert NonpolynomialTerms(x + 6*x**3 + 6, x) == 6
def test_PseudoBinomialQ():
assert PseudoBinomialQ(3 + 5*(x)**6, x)
assert PseudoBinomialQ(3 + 5*(2 + 5*x)**6, x)
def test_PseudoBinomialParts():
assert PseudoBinomialParts(3 + 7*(1 + x)**6, x) == [3, 1, 7**(S(1)/S(6)), 7**(S(1)/S(6)), 6]
assert PseudoBinomialParts(3 + 7*(1 + x)**3, x) == [3, 1, 7**(S(1)/S(3)), 7**(S(1)/S(3)), 3]
assert not PseudoBinomialParts(3 + 7*(1 + x)**2, x)
assert PseudoBinomialParts(3 + 7*(x)**5, x) == [3, 1, 0, 7**(S(1)/S(5)), 5]
def test_PseudoBinomialPairQ():
assert not PseudoBinomialPairQ(3 + 5*(x)**6,3 + (x)**6, x)
assert not PseudoBinomialPairQ(3 + 5*(1 + x)**6,3 + (1 + x)**6, x)
def test_NormalizePseudoBinomial():
assert NormalizePseudoBinomial(3 + 5*(1 + x)**6, x) == 3+(5**(S(1)/S(6))+5**(S(1)/S(6))*x)**S(6)
assert NormalizePseudoBinomial(3 + 5*(x)**6, x) == 3+5*x**6
def test_CancelCommonFactors():
assert CancelCommonFactors(S(x*y*S(6))**S(6), S(x*y*S(6))) == [46656*x**6*y**6, 6*x*y]
assert CancelCommonFactors(S(y*6)**S(6), S(x*y*S(6))) == [46656*y**6, 6*x*y]
assert CancelCommonFactors(S(6), S(3)) == [6, 3]
def test_SimplerIntegrandQ():
assert SimplerIntegrandQ(S(5), 4*x, x)
assert not SimplerIntegrandQ(S(x + 5*x**3), S(x**2 + 3*x), x)
assert SimplerIntegrandQ(S(x + 8), S(x**2 + 3*x), x)
def test_Drop():
assert Drop([1, 2, 3, 4, 5, 6], [2, 4]) == [1, 5, 6]
assert Drop([1, 2, 3, 4, 5, 6], -3) == [1, 2, 3]
assert Drop([1, 2, 3, 4, 5, 6], 2) == [3, 4, 5, 6]
assert Drop(a*b*c, 1) == b*c
def test_SubstForInverseFunction():
assert SubstForInverseFunction(x, a, b, x) == b
assert SubstForInverseFunction(a, a, b, x) == a
assert SubstForInverseFunction(x**a, x**a, b, x) == x
assert SubstForInverseFunction(a*x**a, a, b, x) == a*b**a
def test_SubstForFractionalPower():
assert SubstForFractionalPower(a, b, n, c, x) == a
assert SubstForFractionalPower(x, b, n, c, x) == c
assert SubstForFractionalPower(a**(S(1)/2), a, n, b, x) == x**(n/2)
def test_CombineExponents():
assert True
def test_FractionalPowerOfSquareQ():
assert not FractionalPowerOfSquareQ(x)
assert not FractionalPowerOfSquareQ((a + b)**(S(2)/S(3)))
assert not FractionalPowerOfSquareQ((a + b)**(S(2)/S(3))*c)
assert FractionalPowerOfSquareQ(((a + b*x)**(S(2)))**(S(1)/3)) == (a + b*x)**S(2)
def test_FractionalPowerSubexpressionQ():
assert not FractionalPowerSubexpressionQ(x, a, x)
assert FractionalPowerSubexpressionQ(x**(S(2)/S(3)), a, x)
assert not FractionalPowerSubexpressionQ(b*a, a, x)
def test_FactorNumericGcd():
assert FactorNumericGcd(5*a**2*e**4 + 2*a*b*d*e**3 + 2*a*c*d**2*e**2 + b**2*d**2*e**2 - 6*b*c*d**3*e + 21*c**2*d**4) ==\
5*a**2*e**4 + 2*a*b*d*e**3 + 2*a*c*d**2*e**2 + b**2*d**2*e**2 - 6*b*c*d**3*e + 21*c**2*d**4
assert FactorNumericGcd(x**(S(2))) == x**S(2)
assert FactorNumericGcd(rubi_log(x)) == rubi_log(x)
assert FactorNumericGcd(rubi_log(x)*x) == x*rubi_log(x)
assert FactorNumericGcd(rubi_log(x) + x**S(2)) == rubi_log(x) + x**S(2)
def test_Apply():
assert Apply(List, [a, b, c]) == [a, b, c]
def test_TrigSimplify():
assert TrigSimplify(a*sin(x)**2 + a*cos(x)**2 + v) == a + v
assert TrigSimplify(a*sec(x)**2 - a*tan(x)**2 + v) == a + v
assert TrigSimplify(a*csc(x)**2 - a*cot(x)**2 + v) == a + v
assert TrigSimplify(S(1) - sin(x)**2) == cos(x)**2
assert TrigSimplify(1 + tan(x)**2) == sec(x)**2
assert TrigSimplify(1 + cot(x)**2) == csc(x)**2
assert TrigSimplify(-S(1) + sec(x)**2) == tan(x)**2
assert TrigSimplify(-1 + csc(x)**2) == cot(x)**2
def test_MergeFactors():
assert simplify(MergeFactors(b/(a - c)**3 , 8*c**3*(b*x + c)**(3/2)/(3*b**4) - 24*c**2*(b*x + c)**(5/2)/(5*b**4) + \
24*c*(b*x + c)**(7/2)/(7*b**4) - 8*(b*x + c)**(9/2)/(9*b**4)) - (8*c**3*(b*x + c)**1.5/(3*b**3) - 24*c**2*(b*x + c)**2.5/(5*b**3) + \
24*c*(b*x + c)**3.5/(7*b**3) - 8*(b*x + c)**4.5/(9*b**3))/(a - c)**3) == 0
assert MergeFactors(x, x) == x**2
assert MergeFactors(x*y, x) == x**2*y
def test_FactorInteger():
assert FactorInteger(2434500) == [(2, 2), (3, 2), (5, 3), (541, 1)]
def test_ContentFactor():
assert ContentFactor(a*b + a*c) == a*(b + c)
def test_Order():
assert Order(a, b) == 1
assert Order(b, a) == -1
assert Order(a, a) == 0
def test_FactorOrder():
assert FactorOrder(1, 1) == 0
assert FactorOrder(1, 2) == -1
assert FactorOrder(2, 1) == 1
assert FactorOrder(a, b) == 1
def test_Smallest():
assert Smallest([2, 1, 3, 4]) == 1
assert Smallest(1, 2) == 1
assert Smallest(-1, -2) == -2
def test_MostMainFactorPosition():
assert MostMainFactorPosition([S(1), S(2), S(3)]) == 1
assert MostMainFactorPosition([S(1), S(7), S(3), S(4), S(5)]) == 2
def test_OrderedQ():
assert OrderedQ([a, b])
assert not OrderedQ([b, a])
def test_MinimumDegree():
assert MinimumDegree(S(1), S(2)) == 1
assert MinimumDegree(S(1), sqrt(2)) == 1
assert MinimumDegree(sqrt(2), S(1)) == 1
assert MinimumDegree(sqrt(3), sqrt(2)) == sqrt(2)
assert MinimumDegree(sqrt(2), sqrt(2)) == sqrt(2)
def test_PositiveFactors():
assert PositiveFactors(S(0)) == 1
assert PositiveFactors(-S(1)) == S(1)
assert PositiveFactors(sqrt(2)) == sqrt(2)
assert PositiveFactors(-rubi_log(2)) == rubi_log(2)
assert PositiveFactors(sqrt(2)*S(-1)) == sqrt(2)
def test_NonpositiveFactors():
assert NonpositiveFactors(S(0)) == 0
assert NonpositiveFactors(-S(1)) == -1
assert NonpositiveFactors(sqrt(2)) == 1
assert NonpositiveFactors(-rubi_log(2)) == -1
def test_Sign():
assert Sign(S(0)) == 0
assert Sign(S(1)) == 1
assert Sign(-S(1)) == -1
def test_PolynomialInQ():
v = rubi_log(x)
assert PolynomialInQ(S(1), v, x)
assert PolynomialInQ(v, v, x)
assert PolynomialInQ(1 + v**2, v, x)
assert PolynomialInQ(1 + a*v**2, v, x)
assert not PolynomialInQ(sqrt(v), v, x)
def test_ExponentIn():
v = rubi_log(x)
assert ExponentIn(S(1), rubi_log(x), x) == 0
assert ExponentIn(S(1) + v, rubi_log(x), x) == 1
assert ExponentIn(S(1) + v + v**3, rubi_log(x), x) == 3
assert ExponentIn(S(2)*sqrt(v)*v**3, rubi_log(x), x) == 3.5
def test_PolynomialInSubst():
v = rubi_log(x)
assert PolynomialInSubst(S(1) + rubi_log(x)**3, rubi_log(x), x) == 1 + x**3
assert PolynomialInSubst(S(1) + rubi_log(x), rubi_log(x), x) == x + 1
def test_Distrib():
assert Distrib(x, a) == x*a
assert Distrib(x, a + b) == a*x + b*x
def test_DistributeDegree():
assert DistributeDegree(x, m) == x**m
assert DistributeDegree(x**a, m) == x**(a*m)
assert DistributeDegree(a*b, m) == a**m * b**m
def test_FunctionOfPower():
assert FunctionOfPower(a, x) == None
assert FunctionOfPower(x, x) == 1
assert FunctionOfPower(x**3, x) == 3
assert FunctionOfPower(x**3*cos(x**6), x) == 3
def test_DivideDegreesOfFactors():
assert DivideDegreesOfFactors(a**b, S(3)) == a**(b/3)
assert DivideDegreesOfFactors(a**b*c, S(3)) == a**(b/3)*c**(c/3)
def test_MonomialFactor():
assert MonomialFactor(a, x) == [0, a]
assert MonomialFactor(x, x) == [1, 1]
assert MonomialFactor(x + y, x) == [0, x + y]
assert MonomialFactor(rubi_log(x), x) == [0, rubi_log(x)]
assert MonomialFactor(rubi_log(x)*x, x) == [1, rubi_log(x)]
def test_NormalizeIntegrand():
assert NormalizeIntegrand((x**2 + 8), x) == x**2 + 8
assert NormalizeIntegrand((x**2 + 3*x)**2, x) == x**2*(x + 3)**2
assert NormalizeIntegrand(a**2*(a + b*x)**2, x) == a**2*(a + b*x)**2
assert NormalizeIntegrand(b**2/(a**2*(a + b*x)**2), x) == b**2/(a**2*(a + b*x)**2)
def test_NormalizeIntegrandAux():
v = (6*A*a*c - 2*A*b**2 + B*a*b)/(a*x**2) - (6*A*a**2*c**2 - 10*A*a*b**2*c - 8*A*a*b*c**2*x + 2*A*b**4 + 2*A*b**3*c*x + 5*B*a**2*b*c + 4*B*a**2*c**2*x - B*a*b**3 - B*a*b**2*c*x)/(a**2*(a + b*x + c*x**2)) + (-2*A*b + B*a)*(4*a*c - b**2)/(a**2*x)
assert NormalizeIntegrandAux(v, x) == (6*A*a*c - 2*A*b**2 + B*a*b)/(a*x**2) - (6*A*a**2*c**2 - 10*A*a*b**2*c + 2*A*b**4 + 5*B*a**2*b*c - B*a*b**3 + x*(-8*A*a*b*c**2 + 2*A*b**3*c + 4*B*a**2*c**2 - B*a*b**2*c))/(a**2*(a + b*x + c*x**2)) + (-2*A*b + B*a)*(4*a*c - b**2)/(a**2*x)
assert NormalizeIntegrandAux((x**2 + 3*x)**2, x) == x**2*(x + 3)**2
assert NormalizeIntegrandAux((x**2 + 8), x) == x**2 + 8
def test_NormalizeIntegrandFactor():
assert NormalizeIntegrandFactor((3*x + x**3)**2, x) == x**2*(x**2 + 3)**2
assert NormalizeIntegrandFactor((x**2 + 8), x) == x**2 + 8
def test_NormalizeIntegrandFactorBase():
assert NormalizeIntegrandFactorBase((x**2 + 8)**3, x) == (x**2 + 8)**3
assert NormalizeIntegrandFactorBase((x**2 + 8), x) == x**2 + 8
assert NormalizeIntegrandFactorBase(a**2*(a + b*x)**2, x) == a**2*(a + b*x)**2
def test_AbsorbMinusSign():
assert AbsorbMinusSign((x + 2)**5*(x + 3)**5) == (-x - 3)**5*(x + 2)**5
assert AbsorbMinusSign((x + 2)**5*(x + 3)**2) == -(x + 2)**5*(x + 3)**2
def test_NormalizeLeadTermSigns():
assert NormalizeLeadTermSigns((-x + 3)*(x**2 + 3)) == (-x + 3)*(x**2 + 3)
assert NormalizeLeadTermSigns(x + 3) == x + 3
def test_SignOfFactor():
assert SignOfFactor(S(-x + 3)) == [1, -x + 3]
assert SignOfFactor(S(-x)) == [-1, x]
def test_NormalizePowerOfLinear():
assert NormalizePowerOfLinear((x + 3)**5, x) == (x + 3)**5
assert NormalizePowerOfLinear(((x + 3)**2) + 3, x) == x**2 + 6*x + 12
def test_SimplifyIntegrand():
assert SimplifyIntegrand((x**2 + 3)**2, x) == (x**2 + 3)**2
assert SimplifyIntegrand(x**2 + 3 + (x**6) + 6, x) == x**6 + x**2 + 9
def test_SimplifyTerm():
assert SimplifyTerm(a**2/b**2, x) == a**2/b**2
assert SimplifyTerm(-6*x/5 + (5*x + 3)**2/25 - 9/25, x) == x**2
def test_togetherSimplify():
assert TogetherSimplify(-6*x/5 + (5*x + 3)**2/25 - 9/25) == x**2
def test_ExpandToSum():
qq = 6
Pqq = e**3
Pq = (d+e*x**2)**3
aa = 2
nn = 2
cc = 1
pp = -1/2
bb = 3
assert nsimplify(ExpandToSum(Pq - Pqq*x**qq - Pqq*(aa*x**(-2*nn + qq)*(-2*nn + qq + 1) + bb*x**(-nn + qq)*(nn*(pp - 1) + qq + 1))/(cc*(2*nn*pp + qq + 1)), x) - \
(d**3 + x**4*(3*d*e**2 - 2.4*e**3) + x**2*(3*d**2*e - 1.2*e**3))) == 0
assert ExpandToSum(x**2 + 3*x + 3, x**3 + 3, x) == x**3*(x**2 + 3*x + 3) + 3*x**2 + 9*x + 9
assert ExpandToSum(x**3 + 6, x) == x**3 + 6
assert ExpandToSum(S(x**2 + 3*x + 3)*3, x) == 3*x**2 + 9*x + 9
assert ExpandToSum((a + b*x), x) == a + b*x
def test_UnifySum():
assert UnifySum((3 + x + 6*x**3 + sin(x)), x) == 6*x**3 + x + sin(x) + 3
assert UnifySum((3 + x + 6*x**3)*3, x) == 18*x**3 + 3*x + 9
def test_FunctionOfInverseLinear():
assert FunctionOfInverseLinear((x)/(a + b*x), x) == [a, b]
assert FunctionOfInverseLinear((c + d*x)/(a + b*x), x) == [a, b]
assert not FunctionOfInverseLinear(1/(a + b*x), x)
def test_PureFunctionOfSinhQ():
v = rubi_log(x)
f = sinh(v)
assert PureFunctionOfSinhQ(f, v, x)
assert not PureFunctionOfSinhQ(cosh(v), v, x)
assert PureFunctionOfSinhQ(f**2, v, x)
def test_PureFunctionOfTanhQ():
v = rubi_log(x)
f = tanh(v)
assert PureFunctionOfTanhQ(f, v, x)
assert not PureFunctionOfTanhQ(cosh(v), v, x)
assert PureFunctionOfTanhQ(f**2, v, x)
def test_PureFunctionOfCoshQ():
v = rubi_log(x)
f = cosh(v)
assert PureFunctionOfCoshQ(f, v, x)
assert not PureFunctionOfCoshQ(sinh(v), v, x)
assert PureFunctionOfCoshQ(f**2, v, x)
def test_IntegerQuotientQ():
u = S(2)*sin(x)
v = sin(x)
assert IntegerQuotientQ(u, v)
assert IntegerQuotientQ(u, u)
assert not IntegerQuotientQ(S(1), S(2))
def test_OddQuotientQ():
u = S(3)*sin(x)
v = sin(x)
assert OddQuotientQ(u, v)
assert OddQuotientQ(u, u)
assert not OddQuotientQ(S(1), S(2))
def test_EvenQuotientQ():
u = S(2)*sin(x)
v = sin(x)
assert EvenQuotientQ(u, v)
assert not EvenQuotientQ(u, u)
assert not EvenQuotientQ(S(1), S(2))
def test_FunctionOfSinhQ():
v = rubi_log(x)
assert FunctionOfSinhQ(cos(sinh(v)), v, x)
assert FunctionOfSinhQ(sinh(v), v, x)
assert FunctionOfSinhQ(sinh(v)*cos(sinh(v)), v, x)
def test_FunctionOfCoshQ():
v = rubi_log(x)
assert FunctionOfCoshQ(cos(cosh(v)), v, x)
assert FunctionOfCoshQ(cosh(v), v, x)
assert FunctionOfCoshQ(cosh(v)*cos(cosh(v)), v, x)
def test_FunctionOfTanhQ():
v = rubi_log(x)
t = Tanh(v)
c = Coth(v)
assert FunctionOfTanhQ(t, v, x)
assert FunctionOfTanhQ(c, v, x)
assert FunctionOfTanhQ(t + c, v, x)
assert FunctionOfTanhQ(t*c, v, x)
assert not FunctionOfTanhQ(sin(x), v, x)
def test_FunctionOfTanhWeight():
v = rubi_log(x)
t = Tanh(v)
c = Coth(v)
assert FunctionOfTanhWeight(x, v, x) == 0
assert FunctionOfTanhWeight(sinh(v), v, x) == 0
assert FunctionOfTanhWeight(tanh(v), v, x) == 1
assert FunctionOfTanhWeight(coth(v), v, x) == -1
assert FunctionOfTanhWeight(t**2, v, x) == 1
assert FunctionOfTanhWeight(sinh(v)**2, v, x) == -1
assert FunctionOfTanhWeight(coth(v)*sinh(v)**2, v, x) == -2
def test_FunctionOfHyperbolicQ():
v = rubi_log(x)
s = Sinh(v)
t = Tanh(v)
assert not FunctionOfHyperbolicQ(x, v, x)
assert FunctionOfHyperbolicQ(s + t, v, x)
assert FunctionOfHyperbolicQ(sinh(t), v, x)
def test_SmartNumerator():
assert SmartNumerator(x**(-2)) == 1
assert SmartNumerator(x**(2)*a) == x**2*a
def test_SmartDenominator():
assert SmartDenominator(x**(-2)) == x**2
assert SmartDenominator(x**(-2)*1/S(3)) == x**2*3
def test_SubstForAux():
v = rubi_log(x)
assert SubstForAux(v, v, x) == x
assert SubstForAux(v**2, v, x) == x**2
assert SubstForAux(x, v, x) == x
assert SubstForAux(v**2, v**4, x) == sqrt(x)
assert SubstForAux(v**2*v, v, x) == x**3
def test_SubstForTrig():
v = rubi_log(x)
s, c, t = sin(v), cos(v), tan(v)
assert SubstForTrig(cos(a/2 + b*x/2), x/sqrt(x**2 + 1), 1/sqrt(x**2 + 1), a/2 + b*x/2, x) == 1/sqrt(x**2 + 1)
assert SubstForTrig(s, sin, cos, v, x) == sin
assert SubstForTrig(t, sin(v), cos(v), v, x) == sin(rubi_log(x))/cos(rubi_log(x))
assert SubstForTrig(sin(2*v), sin(x), cos(x), v, x) == 2*sin(x)*cos(x)
assert SubstForTrig(s*t, sin(x), cos(x), v, x) == sin(x)**2/cos(x)
def test_SubstForHyperbolic():
v = rubi_log(x)
s, c, t = sinh(v), cosh(v), tanh(v)
assert SubstForHyperbolic(s, sinh(x), cosh(x), v, x) == sinh(x)
assert SubstForHyperbolic(t, sinh(x), cosh(x), v, x) == sinh(x)/cosh(x)
assert SubstForHyperbolic(sinh(2*v), sinh(x), cosh(x), v, x) == 2*sinh(x)*cosh(x)
assert SubstForHyperbolic(s*t, sinh(x), cosh(x), v, x) == sinh(x)**2/cosh(x)
def test_SubstForFractionalPowerOfLinear():
u = a + b*x
assert not SubstForFractionalPowerOfLinear(u, x)
assert not SubstForFractionalPowerOfLinear(u**(S(2)), x)
assert SubstForFractionalPowerOfLinear(u**(S(1)/2), x) == [x**2, 2, a + b*x, 1/b]
def test_InverseFunctionOfLinear():
u = a + b*x
assert InverseFunctionOfLinear(rubi_log(u)*sin(x), x) == rubi_log(u)
assert InverseFunctionOfLinear(rubi_log(u), x) == rubi_log(u)
def test_InertTrigQ():
s = sin(x)
c = cos(x)
assert not InertTrigQ(sin(x), csc(x), cos(h))
assert InertTrigQ(sin(x), csc(x))
assert not InertTrigQ(s, c)
assert InertTrigQ(c)
def test_PowerOfInertTrigSumQ():
func = sin
assert PowerOfInertTrigSumQ((1 + S(2)*(S(3)*func(x**2))**S(5))**3, func, x)
assert PowerOfInertTrigSumQ((1 + 2*(S(3)*func(x**2))**3 + 4*(S(5)*func(x**2))**S(3))**2, func, x)
def test_PiecewiseLinearQ():
assert PiecewiseLinearQ(a + b*x, x)
assert not PiecewiseLinearQ(Log(c*sin(a)**S(3)), x)
assert not PiecewiseLinearQ(x**3, x)
assert PiecewiseLinearQ(atanh(tanh(a + b*x)), x)
assert PiecewiseLinearQ(tanh(atanh(a + b*x)), x)
assert not PiecewiseLinearQ(coth(atanh(a + b*x)), x)
def test_KnownTrigIntegrandQ():
func = sin(a + b*x)
assert KnownTrigIntegrandQ([sin], S(1), x)
assert KnownTrigIntegrandQ([sin], (a + b*func)**m, x)
assert KnownTrigIntegrandQ([sin], (a + b*func)**m*(1 + 2*func), x)
assert KnownTrigIntegrandQ([sin], a + c*func**2, x)
assert KnownTrigIntegrandQ([sin], a + b*func + c*func**2, x)
assert KnownTrigIntegrandQ([sin], (a + b*func)**m*(c + d*func**2), x)
assert KnownTrigIntegrandQ([sin], (a + b*func)**m*(c + d*func + e*func**2), x)
assert not KnownTrigIntegrandQ([cos], (a + b*func)**m, x)
def test_KnownSineIntegrandQ():
assert KnownSineIntegrandQ((a + b*sin(a + b*x))**m, x)
def test_KnownTangentIntegrandQ():
assert KnownTangentIntegrandQ((a + b*tan(a + b*x))**m, x)
def test_KnownCotangentIntegrandQ():
assert KnownCotangentIntegrandQ((a + b*cot(a + b*x))**m, x)
def test_KnownSecantIntegrandQ():
assert KnownSecantIntegrandQ((a + b*sec(a + b*x))**m, x)
def test_TryPureTanSubst():
assert TryPureTanSubst(atan(c*(a + b*tan(a + b*x))), x)
assert TryPureTanSubst(atanh(c*(a + b*cot(a + b*x))), x)
assert not TryPureTanSubst(tan(c*(a + b*cot(a + b*x))), x)
def test_TryPureTanhSubst():
assert not TryPureTanhSubst(rubi_log(x), x)
assert TryPureTanhSubst(sin(x), x)
assert not TryPureTanhSubst(atanh(a*tanh(x)), x)
assert not TryPureTanhSubst((a + b*x)**S(2), x)
def test_TryTanhSubst():
assert not TryTanhSubst(rubi_log(x), x)
assert not TryTanhSubst(a*(b + c)**3, x)
assert not TryTanhSubst(1/(a + b*sinh(x)**S(3)), x)
assert not TryTanhSubst(sinh(S(3)*x)*cosh(S(4)*x), x)
assert not TryTanhSubst(a*(b*sech(x)**3)**c, x)
def test_GeneralizedBinomialQ():
assert GeneralizedBinomialQ(a*x**q + b*x**n, x)
assert not GeneralizedBinomialQ(a*x**q, x)
def test_GeneralizedTrinomialQ():
assert not GeneralizedTrinomialQ(7 + 2*x**6 + 3*x**12, x)
assert not GeneralizedTrinomialQ(a*x**q + c*x**(2*n-q), x)
def test_SubstForFractionalPowerOfQuotientOfLinears():
assert SubstForFractionalPowerOfQuotientOfLinears(((a + b*x)/(c + d*x))**(S(3)/2), x) == [x**4/(b - d*x**2)**2, 2, (a + b*x)/(c + d*x), -a*d + b*c]
def test_SubstForFractionalPowerQ():
assert SubstForFractionalPowerQ(x, sin(x), x)
assert SubstForFractionalPowerQ(x**2, sin(x), x)
assert not SubstForFractionalPowerQ(x**(S(3)/2), sin(x), x)
assert SubstForFractionalPowerQ(sin(x)**(S(3)/2), sin(x), x)
def test_AbsurdNumberGCD():
assert AbsurdNumberGCD(S(4)) == 4
assert AbsurdNumberGCD(S(4), S(8), S(12)) == 4
assert AbsurdNumberGCD(S(2), S(3), S(12)) == 1
def test_TrigReduce():
assert TrigReduce(cos(x)**2) == cos(2*x)/2 + 1/2
assert TrigReduce(cos(x)**2*sin(x)) == sin(x)/4 + sin(3*x)/4
assert TrigReduce(cos(x)**2+sin(x)) == sin(x) + cos(2*x)/2 + 1/2
assert TrigReduce(cos(x)**2*sin(x)**5) == 5*sin(x)/64 + sin(3*x)/64 - 3*sin(5*x)/64 + sin(7*x)/64
assert TrigReduce(2*sin(x)*cos(x) + 2*cos(x)**2) == sin(2*x) + cos(2*x) + 1
assert TrigReduce(sinh(a + b*x)**2) == cosh(2*a + 2*b*x)/2 - 1/2
assert TrigReduce(sinh(a + b*x)*cosh(a + b*x)) == sinh(2*a + 2*b*x)/2
def test_FunctionOfDensePolynomialsQ():
assert FunctionOfDensePolynomialsQ(x**2 + 3, x)
assert not FunctionOfDensePolynomialsQ(x**2, x)
assert not FunctionOfDensePolynomialsQ(x, x)
assert FunctionOfDensePolynomialsQ(S(2), x)
def test_PureFunctionOfSinQ():
v = rubi_log(x)
f = sin(v)
assert PureFunctionOfSinQ(f, v, x)
assert not PureFunctionOfSinQ(cos(v), v, x)
assert PureFunctionOfSinQ(f**2, v, x)
def test_PureFunctionOfTanQ():
v = rubi_log(x)
f = tan(v)
assert PureFunctionOfTanQ(f, v, x)
assert not PureFunctionOfTanQ(cos(v), v, x)
assert PureFunctionOfTanQ(f**2, v, x)
def test_PowerVariableSubst():
assert PowerVariableSubst((2*x)**3, 2, x) == 8*x**(3/2)
assert PowerVariableSubst((2*x)**3, 2, x) == 8*x**(3/2)
assert PowerVariableSubst((2*x), 2, x) == 2*x
assert PowerVariableSubst((2*x)**3, 2, x) == 8*x**(3/2)
assert PowerVariableSubst((2*x)**7, 2, x) == 128*x**(7/2)
assert PowerVariableSubst((6+2*x)**7, 2, x) == (2*x + 6)**7
assert PowerVariableSubst((2*x)**7+3, 2, x) == 128*x**(7/2) + 3
def test_PowerVariableDegree():
assert PowerVariableDegree(S(2), 0, 2*x, x) == [0, 2*x]
assert PowerVariableDegree((2*x)**2, 0, 2*x, x) == [2, 1]
assert PowerVariableDegree(x**2, 0, 2*x, x) == [2, 1]
assert PowerVariableDegree(S(4), 0, 2*x, x) == [0, 2*x]
def test_PowerVariableExpn():
assert not PowerVariableExpn((x)**3, 2, x)
assert not PowerVariableExpn((2*x)**3, 2, x)
assert PowerVariableExpn((2*x)**2, 4, x) == [4*x**3, 2, 1]
def test_FunctionOfQ():
assert FunctionOfQ(x**2, sqrt(-exp(2*x**2) + 1)*exp(x**2),x)
assert not FunctionOfQ(S(x**3), x*2, x)
assert FunctionOfQ(S(a), x*2, x)
assert FunctionOfQ(S(3*x), x*2, x)
def test_ExpandTrigExpand():
assert ExpandTrigExpand(1, cos(x), x**2, 2, 2, x) == 4*cos(x**2)**4 - 4*cos(x**2)**2 + 1
assert ExpandTrigExpand(1, cos(x) + sin(x), x**2, 2, 2, x) == 4*sin(x**2)**2*cos(x**2)**2 + 8*sin(x**2)*cos(x**2)**3 - 4*sin(x**2)*cos(x**2) + 4*cos(x**2)**4 - 4*cos(x**2)**2 + 1
def test_TrigToExp():
from sympy.integrals.rubi.utility_function import rubi_exp as exp
assert TrigToExp(sin(x)) == -I*(exp(I*x) - exp(-I*x))/2
assert TrigToExp(cos(x)) == exp(I*x)/2 + exp(-I*x)/2
assert TrigToExp(cos(x)*tan(x**2)) == I*(exp(I*x)/2 + exp(-I*x)/2)*(-exp(I*x**2) + exp(-I*x**2))/(exp(I*x**2) + exp(-I*x**2))
assert TrigToExp(cos(x) + sin(x)**2) == -(exp(I*x) - exp(-I*x))**2/4 + exp(I*x)/2 + exp(-I*x)/2
assert Simplify(TrigToExp(cos(x)*tan(x**S(2))*sin(x)**S(2))-(-I*(exp(I*x)/S(2) + exp(-I*x)/S(2))*(exp(I*x) - exp(-I*x))**S(2)*(-exp(I*x**S(2)) + exp(-I*x**S(2)))/(S(4)*(exp(I*x**S(2)) + exp(-I*x**S(2)))))) == 0
def test_ExpandTrigReduce():
assert ExpandTrigReduce(2*cos(3 + x)**3, x) == 3*cos(x + 3)/2 + cos(3*x + 9)/2
assert ExpandTrigReduce(2*sin(x)**3+cos(2 + x), x) == 3*sin(x)/2 - sin(3*x)/2 + cos(x + 2)
assert ExpandTrigReduce(cos(x + 3)**2, x) == cos(2*x + 6)/2 + 1/2
def test_NormalizeTrig():
assert NormalizeTrig(S(2*sin(2 + x)), x) == 2*sin(x + 2)
assert NormalizeTrig(S(2*sin(2 + x)**3), x) == 2*sin(x + 2)**3
assert NormalizeTrig(S(2*sin((2 + x)**2)**3), x) == 2*sin(x**2 + 4*x + 4)**3
def test_FunctionOfTrigQ():
v = rubi_log(x)
s = sin(v)
t = tan(v)
assert not FunctionOfTrigQ(x, v, x)
assert FunctionOfTrigQ(s + t, v, x)
assert FunctionOfTrigQ(sin(t), v, x)
def test_RationalFunctionExpand():
assert RationalFunctionExpand(x**S(5)*(e + f*x)**n/(a + b*x**S(3)), x) == -a*x**2*(e + f*x)**n/(b*(a + b*x**3)) +\
e**2*(e + f*x)**n/(b*f**2) - 2*e*(e + f*x)**(n + 1)/(b*f**2) + (e + f*x)**(n + 2)/(b*f**2)
assert RationalFunctionExpand(x**S(3)*(S(2)*x + 2)**S(2)/(2*x**2 + 1), x) == 2*x**3 + 4*x**2 + x + (- x + 2)/(2*x**2 + 1) - 2
assert RationalFunctionExpand((a + b*x + c*x**4)*rubi_log(x)**3, x) == a*rubi_log(x)**3 + b*x*rubi_log(x)**3 + c*x**4*rubi_log(x)**3
assert RationalFunctionExpand(a + b*x + c*x**4, x) == a + b*x + c*x**4
def test_SameQ():
assert SameQ(1, 1, 1)
assert not SameQ(1, 1, 2)
def test_Map2():
assert Map2(Add, [a, b, c], [x, y, z]) == [a + x, b + y, c + z]
def test_ConstantFactor():
assert ConstantFactor(a + a*x**3, x) == [a, x**3 + 1]
assert ConstantFactor(a, x) == [a, 1]
assert ConstantFactor(x, x) == [1, x]
assert ConstantFactor(x**S(3), x) == [1, x**3]
assert ConstantFactor(x**(S(3)/2), x) == [1, x**(3/2)]
assert ConstantFactor(a*x**3, x) == [a, x**3]
assert ConstantFactor(a + x**3, x) == [1, a + x**3]
def test_CommonFactors():
assert CommonFactors([a, a, a]) == [a, 1, 1, 1]
assert CommonFactors([x*S(2), x**S(3)*S(2), sin(x)*x*S(2)]) == [2, x, x**3, x*sin(x)]
assert CommonFactors([x, x**S(3), sin(x)*x]) == [1, x, x**3, x*sin(x)]
assert CommonFactors([S(2), S(4), S(6)]) == [2, 1, 2, 3]
def test_FunctionOfLinear():
f = sin(a + b*x)
assert FunctionOfLinear(f, x) == [sin(x), a, b]
assert FunctionOfLinear(a + b*x, x) == [x, a, b]
assert not FunctionOfLinear(a, x)
def test_FunctionOfExponentialQ():
assert FunctionOfExponentialQ(exp(x + exp(x) + exp(exp(x))), x)
assert FunctionOfExponentialQ(a**(a + b*x), x)
assert FunctionOfExponentialQ(a**(b*x), x)
assert not FunctionOfExponentialQ(a**sin(a + b*x), x)
def test_FunctionOfExponential():
assert FunctionOfExponential(a**(a + b*x), x)
def test_FunctionOfExponentialFunction():
assert FunctionOfExponentialFunction(a**(a + b*x), x) == x
assert FunctionOfExponentialFunction(S(2)*a**(a + b*x), x) == 2*x
def test_FunctionOfTrig():
assert FunctionOfTrig(sin(x + 1), x + 1, x) == x + 1
assert FunctionOfTrig(sin(x), x) == x
assert not FunctionOfTrig(cos(x**2 + 1), x)
assert FunctionOfTrig(sin(a+b*x)**3, x) == a+b*x
def test_AlgebraicTrigFunctionQ():
assert AlgebraicTrigFunctionQ(sin(x + 3), x)
assert AlgebraicTrigFunctionQ(x, x)
assert AlgebraicTrigFunctionQ(x + 1, x)
assert AlgebraicTrigFunctionQ(sinh(x + 1), x)
assert AlgebraicTrigFunctionQ(sinh(x + 1)**2, x)
assert not AlgebraicTrigFunctionQ(sinh(x**2 + 1)**2, x)
def test_FunctionOfHyperbolic():
assert FunctionOfTrig(sin(x + 1), x + 1, x) == x + 1
assert FunctionOfTrig(sin(x), x) == x
assert not FunctionOfTrig(cos(x**2 + 1), x)
def test_FunctionOfExpnQ():
assert FunctionOfExpnQ(x, x, x) == 1
assert FunctionOfExpnQ(x**2, x, x) == 2
assert FunctionOfExpnQ(x**2.1, x, x) == 1
assert not FunctionOfExpnQ(x, x**2, x)
assert not FunctionOfExpnQ(x + 1, (x + 5)**2, x)
assert not FunctionOfExpnQ(x + 1, (x + 1)**2, x)
def test_PureFunctionOfCosQ():
v = rubi_log(x)
f = cos(v)
assert PureFunctionOfCosQ(f, v, x)
assert not PureFunctionOfCosQ(sin(v), v, x)
assert PureFunctionOfCosQ(f**2, v, x)
def test_PureFunctionOfCotQ():
v = rubi_log(x)
f = cot(v)
assert PureFunctionOfCotQ(f, v, x)
assert not PureFunctionOfCotQ(sin(v), v, x)
assert PureFunctionOfCotQ(f**2, v, x)
def test_FunctionOfSinQ():
v = rubi_log(x)
assert FunctionOfSinQ(cos(sin(v)), v, x)
assert FunctionOfSinQ(sin(v), v, x)
assert FunctionOfSinQ(sin(v)*cos(sin(v)), v, x)
def test_FunctionOfCosQ():
v = rubi_log(x)
assert FunctionOfCosQ(cos(cos(v)), v, x)
assert FunctionOfCosQ(cos(v), v, x)
assert FunctionOfCosQ(cos(v)*cos(cos(v)), v, x)
def test_FunctionOfTanQ():
v = rubi_log(x)
t = tan(v)
c = cot(v)
assert FunctionOfTanQ(t, v, x)
assert FunctionOfTanQ(c, v, x)
assert FunctionOfTanQ(t + c, v, x)
assert FunctionOfTanQ(t*c, v, x)
assert not FunctionOfTanQ(sin(x), v, x)
def test_FunctionOfTanWeight():
v = rubi_log(x)
t = tan(v)
c = cot(v)
assert FunctionOfTanWeight(x, v, x) == 0
assert FunctionOfTanWeight(sin(v), v, x) == 0
assert FunctionOfTanWeight(tan(v), v, x) == 1
assert FunctionOfTanWeight(cot(v), v, x) == -1
assert FunctionOfTanWeight(t**2, v, x) == 1
assert FunctionOfTanWeight(sin(v)**2, v, x) == -1
assert FunctionOfTanWeight(cot(v)*sin(v)**2, v, x) == -2
def test_OddTrigPowerQ():
assert not OddTrigPowerQ(sin(x)**3, 1, x)
assert OddTrigPowerQ(sin(3),1,x)
assert OddTrigPowerQ(sin(3*x),x,x)
assert OddTrigPowerQ(sin(3*x)**3,x,x)
def test_FunctionOfLog():
assert not FunctionOfLog(x**2*(a + b*x)**3*exp(-a - b*x) ,False, False, x)
assert FunctionOfLog(rubi_log(2*x**8)*2 + rubi_log(2*x**8) + 1, x) == [3*x + 1, 2*x**8, 8]
assert FunctionOfLog(rubi_log(2*x)**2,x) == [x**2, 2*x, 1]
assert FunctionOfLog(rubi_log(3*x**3)**2 + 1,x) == [x**2 + 1, 3*x**3, 3]
assert FunctionOfLog(rubi_log(2*x**8)*2,x) == [2*x, 2*x**8, 8]
assert not FunctionOfLog(2*sin(x)*2,x)
def test_EulerIntegrandQ():
assert EulerIntegrandQ((2*x + 3*((x + 1)**3)**1.5)**(-3), x)
assert not EulerIntegrandQ((2*x + (2*x**2)**2)**3, x)
assert not EulerIntegrandQ(3*x**2 + 5*x + 1, x)
def test_Divides():
assert not Divides(x, a*x**2, x)
assert Divides(x, a*x, x) == a
def test_EasyDQ():
assert EasyDQ(3*x**2, x)
assert EasyDQ(3*x**3 - 6, x)
assert EasyDQ(x**3, x)
assert EasyDQ(sin(x**rubi_log(3)), x)
def test_ProductOfLinearPowersQ():
assert ProductOfLinearPowersQ(S(1), x)
assert ProductOfLinearPowersQ((x + 1)**3, x)
assert not ProductOfLinearPowersQ((x**2 + 1)**3, x)
assert ProductOfLinearPowersQ(x + 1, x)
def test_Rt():
b = symbols('b')
assert Rt(-b**2, 4) == (-b**2)**(S(1)/S(4))
assert Rt(x**2, 2) == x
assert Rt(S(2 + 3*I), S(8)) == (2 + 3*I)**(1/8)
assert Rt(x**2 + 4 + 4*x, 2) == x + 2
assert Rt(S(8), S(3)) == 2
assert Rt(S(16807), S(5)) == 7
def test_NthRoot():
assert NthRoot(S(14580), S(3)) == 9*2**(S(2)/S(3))*5**(S(1)/S(3))
assert NthRoot(9, 2) == 3.0
assert NthRoot(81, 2) == 9.0
assert NthRoot(81, 4) == 3.0
def test_AtomBaseQ():
assert not AtomBaseQ(x**2)
assert AtomBaseQ(x**3)
assert AtomBaseQ(x)
assert AtomBaseQ(S(2)**3)
assert not AtomBaseQ(sin(x))
def test_SumBaseQ():
assert not SumBaseQ((x + 1)**2)
assert SumBaseQ((x + 1)**3)
assert SumBaseQ(3*x+3)
assert not SumBaseQ(x)
def test_NegSumBaseQ():
assert not NegSumBaseQ(-x + 1)
assert NegSumBaseQ(x - 1)
assert not NegSumBaseQ((x - 1)**2)
assert NegSumBaseQ((x - 1)**3)
def test_AllNegTermQ():
x = Symbol('x', negative=True)
assert AllNegTermQ(x)
assert not AllNegTermQ(x + 2)
assert AllNegTermQ(x - 2)
assert AllNegTermQ((x - 2)**3)
assert not AllNegTermQ((x - 2)**2)
def test_TrigSquareQ():
assert TrigSquareQ(sin(x)**2)
assert TrigSquareQ(cos(x)**2)
assert not TrigSquareQ(tan(x)**2)
def test_Inequality():
assert not Inequality(S('0'), Less, m, LessEqual, S('1'))
assert Inequality(S('0'), Less, S('1'))
assert Inequality(S('0'), Less, S('1'), LessEqual, S('5'))
def test_SplitProduct():
assert SplitProduct(OddQ, S(3)*x) == [3, x]
assert not SplitProduct(OddQ, S(2)*x)
def test_SplitSum():
assert SplitSum(FracPart, sin(x)) == [sin(x), 0]
assert SplitSum(FracPart, sin(x) + S(2)) == [sin(x), S(2)]
def test_Complex():
assert Complex(a, b) == a + I*b
def test_SimpFixFactor():
assert SimpFixFactor((a*c + b*c)**S(4), x) == (a*c + b*c)**4
assert SimpFixFactor((a*Complex(0, c) + b*Complex(0, d))**S(3), x) == -I*(a*c + b*d)**3
assert SimpFixFactor((a*Complex(0, d) + b*Complex(0, e) + c*Complex(0, f))**S(2), x) == -(a*d + b*e + c*f)**2
assert SimpFixFactor((a + b*x**(-1/S(2))*x**S(3))**S(3), x) == (a + b*x**(5/2))**3
assert SimpFixFactor((a*c + b*c**S(2)*x**S(2))**S(3), x) == c**3*(a + b*c*x**2)**3
assert SimpFixFactor((a*c**S(2) + b*c**S(1)*x**S(2))**S(3), x) == c**3*(a*c + b*x**2)**3
assert SimpFixFactor(a*cos(x)**2 + a*sin(x)**2 + v, x) == a*cos(x)**2 + a*sin(x)**2 + v
def test_SimplifyAntiderivative():
assert SimplifyAntiderivative(acoth(coth(x)), x) == x
assert SimplifyAntiderivative(a*x, x) == a*x
assert SimplifyAntiderivative(atanh(cot(x)), x) == atanh(2*sin(x)*cos(x))/2
assert SimplifyAntiderivative(a*cos(x)**2 + a*sin(x)**2 + v, x) == a*cos(x)**2 + a*sin(x)**2
def test_FixSimplify():
assert FixSimplify(x*Complex(0, a)*(v*Complex(0, b) + w)**S(3)) == a*x*(b*v - I*w)**3
def test_TrigSimplifyAux():
assert TrigSimplifyAux(a*cos(x)**2 + a*sin(x)**2 + v) == a + v
assert TrigSimplifyAux(x**2) == x**2
def test_SubstFor():
assert SubstFor(x**2 + 1, tanh(x), x) == tanh(x)
assert SubstFor(x**2, sinh(x), x) == sinh(sqrt(x))
def test_FresnelS():
assert FresnelS(oo) == 1/2
assert FresnelS(0) == 0
def test_FresnelC():
assert FresnelC(0) == 0
assert FresnelC(oo) == 1/2
def test_Erfc():
assert Erfc(0) == 1
assert Erfc(oo) == 0
def test_Erfi():
assert Erfi(oo) is oo
assert Erfi(0) == 0
def test_Gamma():
assert Gamma(u) == gamma(u)
def test_ElementaryFunctionQ():
assert ElementaryFunctionQ(x + y)
assert ElementaryFunctionQ(sin(x + y))
assert ElementaryFunctionQ(E**(x*a))
def test_Util_Part():
from sympy.integrals.rubi.utility_function import Util_Part
assert Util_Part(1, a + b).doit() == a
assert Util_Part(c, a + b).doit() == Util_Part(c, a + b)
def test_Part():
assert Part([1, 2, 3], 1) == 1
assert Part(a*b, 1) == a
def test_PolyLog():
assert PolyLog(a, b) == polylog(a, b)
def test_PureFunctionOfCothQ():
v = rubi_log(x)
assert PureFunctionOfCothQ(coth(v), v, x)
assert PureFunctionOfCothQ(a + coth(v), v, x)
assert not PureFunctionOfCothQ(sin(v), v, x)
def test_ExpandIntegrand():
assert ExpandIntegrand(sqrt(a + b*x**S(2) + c*x**S(4)), (f*x)**(S(3)/2)*(d + e*x**S(2)), x) == \
d*(f*x)**(3/2)*sqrt(a + b*x**2 + c*x**4) + e*(f*x)**(7/2)*sqrt(a + b*x**2 + c*x**4)/f**2
assert ExpandIntegrand((6*A*a*c - 2*A*b**2 + B*a*b - 2*c*x*(A*b - 2*B*a))/(x**2*(a + b*x + c*x**2)), x) == \
(6*A*a*c - 2*A*b**2 + B*a*b)/(a*x**2) + (-6*A*a**2*c**2 + 10*A*a*b**2*c - 2*A*b**4 - 5*B*a**2*b*c + B*a*b**3 + x*(8*A*a*b*c**2 - 2*A*b**3*c - 4*B*a**2*c**2 + B*a*b**2*c))/(a**2*(a + b*x + c*x**2)) + (-2*A*b + B*a)*(4*a*c - b**2)/(a**2*x)
assert ExpandIntegrand(x**2*(e + f*x)**3*F**(a + b*(c + d*x)**1), x) == F**(a + b*(c + d*x))*e**2*(e + f*x)**3/f**2 - 2*F**(a + b*(c + d*x))*e*(e + f*x)**4/f**2 + F**(a + b*(c + d*x))*(e + f*x)**5/f**2
assert ExpandIntegrand((x)*(a + b*x)**2*f**(e*(c + d*x)**n), x) == a**2*f**(e*(c + d*x)**n)*x + 2*a*b*f**(e*(c + d*x)**n)*x**2 + b**2*f**(e*(c + d*x)**n)*x**3
assert ExpandIntegrand(sin(x)**3*(a + b*(1/sin(x)))**2, x) == a**2*sin(x)**3 + 2*a*b*sin(x)**2 + b**2*sin(x)
assert ExpandIntegrand(x*(a + b*ArcSin(c + d*x))**n, x) == -c*(a + b*asin(c + d*x))**n/d + (a + b*asin(c + d*x))**n*(c + d*x)/d
assert ExpandIntegrand((a + b*x)**S(3)*(A + B*x)/(c + d*x), x) == B*(a + b*x)**3/d + b*(a + b*x)**2*(A*d - B*c)/d**2 + b*(a + b*x)*(A*d - B*c)*(a*d - b*c)/d**3 + b*(A*d - B*c)*(a*d - b*c)**2/d**4 + (A*d - B*c)*(a*d - b*c)**3/(d**4*(c + d*x))
assert ExpandIntegrand((x**2)*(S(3)*x)**(S(1)/2), x) ==sqrt(3)*x**(5/2)
assert ExpandIntegrand((x)*(sin(x))**(S(1)/2), x) == x*sqrt(sin(x))
assert ExpandIntegrand(x*(e + f*x)**2*F**(b*(c + d*x)), x) == -F**(b*(c + d*x))*e*(e + f*x)**2/f + F**(b*(c + d*x))*(e + f*x)**3/f
assert ExpandIntegrand(x**m*(e + f*x)**2*F**(b*(c + d*x)**n), x) == F**(b*(c + d*x)**n)*e**2*x**m + 2*F**(b*(c + d*x)**n)*e*f*x*x**m + F**(b*(c + d*x)**n)*f**2*x**2*x**m
assert simplify(ExpandIntegrand((S(1) - S(1)*x**S(2))**(-S(3)), x) - (-S(3)/(8*(x**2 - 1)) + S(3)/(16*(x + 1)**2) + S(1)/(S(8)*(x + 1)**3) + S(3)/(S(16)*(x - 1)**2) - S(1)/(S(8)*(x - 1)**3))) == 0
assert ExpandIntegrand(-S(1), 1/((-q - x)**3*(q - x)**3), x) == 1/(8*q**3*(q + x)**3) - 1/(8*q**3*(-q + x)**3) - 3/(8*q**4*(-q**2 + x**2)) + 3/(16*q**4*(q + x)**2) + 3/(16*q**4*(-q + x)**2)
assert ExpandIntegrand((1 + 1*x)**(3)/(2 + 1*x), x) == x**2 + x + 1 - 1/(x + 2)
assert ExpandIntegrand((c + d*x**1 + e*x**2)/(1 - x**3), x) == (c - (-1)**(S(1)/3)*d + (-1)**(S(2)/3)*e)/(-3*(-1)**(S(2)/3)*x + 3) + (c + (-1)**(S(2)/3)*d - (-1)**(S(1)/3)*e)/(3*(-1)**(S(1)/3)*x + 3) + (c + d + e)/(-3*x + 3)
assert ExpandIntegrand((c + d*x**1 + e*x**2 + f*x**3)/(1 - x**4), x) == (c + I*d - e - I*f)/(4*I*x + 4) + (c - I*d - e + I*f)/(-4*I*x + 4) + (c - d + e - f)/(4*x + 4) + (c + d + e + f)/(-4*x + 4)
assert ExpandIntegrand((d + e*(f + g*x))/(2 + 3*x + 1*x**2), x) == (-2*d - 2*e*f + 4*e*g)/(2*x + 4) + (2*d + 2*e*f - 2*e*g)/(2*x + 2)
assert ExpandIntegrand(x/(a*x**3 + b*Sqrt(c + d*x**6)), x) == a*x**4/(-b**2*c + x**6*(a**2 - b**2*d)) + b*x*sqrt(c + d*x**6)/(b**2*c + x**6*(-a**2 + b**2*d))
assert simplify(ExpandIntegrand(x**1*(1 - x**4)**(-2), x) - (x/(S(4)*(x**2 + 1)) + x/(S(4)*(x**2 + 1)**2) - x/(S(4)*(x**2 - 1)) + x/(S(4)*(x**2 - 1)**2))) == 0
assert simplify(ExpandIntegrand((-1 + x**S(6))**(-3), x) - (S(3)/(S(8)*(x**6 - 1)) - S(3)/(S(16)*(x**S(3) + S(1))**S(2)) - S(1)/(S(8)*(x**S(3) + S(1))**S(3)) - S(3)/(S(16)*(x**S(3) - S(1))**S(2)) + S(1)/(S(8)*(x**S(3) - S(1))**S(3)))) == 0
assert simplify(ExpandIntegrand(u**1*(a + b*u**2 + c*u**4)**(-1), x)) == simplify(1/(2*b*(u + sqrt(-(a + c*u**4)/b))) - 1/(2*b*(-u + sqrt(-(a + c*u**4)/b))))
assert simplify(ExpandIntegrand((1 + 1*u + 1*u**2)**(-2), x) - (S(1)/(S(2)*(-u - 1)*(-u**2 - u - 1)) + S(1)/(S(4)*(-u - 1)*(u + sqrt(-u - 1))**2) + S(1)/(S(4)*(-u - 1)*(u - sqrt(-u - 1))**2))) == 0
assert ExpandIntegrand(x*(a + b*Log(c*(d*(e + f*x)**p)**q))**n, x) == -e*(a + b*rubi_log(c*(d*(e + f*x)**p)**q))**n/f + (a + b*rubi_log(c*(d*(e + f*x)**p)**q))**n*(e + f*x)/f
assert ExpandIntegrand(x*f**(e*(c + d*x)*S(1)), x) == f**(e*(c + d*x))*x
assert simplify(ExpandIntegrand((x)*(a + b*x)**m*Log(c*(d + e*x**n)**p), x) - (-a*(a + b*x)**m*rubi_log(c*(d + e*x**n)**p)/b + (a + b*x)**(m + S(1))*rubi_log(c*(d + e*x**n)**p)/b)) == 0
assert simplify(ExpandIntegrand(u*(a + b*F**v)**S(2)*(c + d*F**v)**S(-3), x) - (b**2*u/(d**2*(F**v*d + c)) + 2*b*u*(a*d - b*c)/(d**2*(F**v*d + c)**2) + u*(a*d - b*c)**2/(d**2*(F**v*d + c)**3))) == 0
assert ExpandIntegrand((S(1) + 1*x)**S(2)*f**(e*(1 + S(1)*x)**n)/(g + h*x), x) == f**(e*(x + 1)**n)*(x + 1)/h + f**(e*(x + 1)**n)*(-g + h)/h**2 + f**(e*(x + 1)**n)*(g - h)**2/(h**2*(g + h*x))
assert ExpandIntegrand((a*c - b*c*x)**2/(a + b*x)**2, x) == 4*a**2*c**2/(a + b*x)**2 - 4*a*c**2/(a + b*x) + c**2
assert simplify(ExpandIntegrand(x**2*(1 - 1*x**2)**(-2), x) - (1/(S(2)*(x**2 - 1)) + 1/(S(4)*(x + 1)**2) + 1/(S(4)*(x - 1)**2))) == 0
assert ExpandIntegrand((a + x)**2, x) == a**2 + 2*a*x + x**2
assert ExpandIntegrand((a + b*x)**S(2)/x**3, x) == a**2/x**3 + 2*a*b/x**2 + b**2/x
assert ExpandIntegrand(1/(x**2*(a + b*x)**2), x) == b**2/(a**2*(a + b*x)**2) + 1/(a**2*x**2) + 2*b**2/(a**3*(a + b*x)) - 2*b/(a**3*x)
assert ExpandIntegrand((1 + x)**3/x, x) == x**2 + 3*x + 3 + 1/x
assert ExpandIntegrand((1 + 2*(3 + 4*x**2))/(2 + 3*x**2 + 1*x**4), x) == 18/(2*x**2 + 4) - 2/(2*x**2 + 2)
assert ExpandIntegrand((c + d*x**2 + e*x**3)/(1 - 1*x**4), x) == (c - d - I*e)/(4*I*x + 4) + (c - d + I*e)/(-4*I*x + 4) + (c + d - e)/(4*x + 4) + (c + d + e)/(-4*x + 4)
assert ExpandIntegrand((a + b*x)**2/(c + d*x), x) == b*(a + b*x)/d + b*(a*d - b*c)/d**2 + (a*d - b*c)**2/(d**2*(c + d*x))
assert ExpandIntegrand(x**2*(a + b*Log(c*(d*(e + f*x)**p)**q))**n, x) == e**2*(a + b*rubi_log(c*(d*(e + f*x)**p)**q))**n/f**2 - 2*e*(a + b*rubi_log(c*(d*(e + f*x)**p)**q))**n*(e + f*x)/f**2 + (a + b*rubi_log(c*(d*(e + f*x)**p)**q))**n*(e + f*x)**2/f**2
assert ExpandIntegrand(x*(1 + 2*x)**3*rubi_log(2*(1 + 1*x**2)**1), x) == 8*x**4*rubi_log(2*x**2 + 2) + 12*x**3*rubi_log(2*x**2 + 2) + 6*x**2*rubi_log(2*x**2 + 2) + x*rubi_log(2*x**2 + 2)
assert ExpandIntegrand((1 + 1*x)**S(3)*f**(e*(1 + 1*x)**n)/(g + h*x), x) == f**(e*(x + 1)**n)*(x + 1)**2/h + f**(e*(x + 1)**n)*(-g + h)*(x + 1)/h**2 + f**(e*(x + 1)**n)*(-g + h)**2/h**3 - f**(e*(x + 1)**n)*(g - h)**3/(h**3*(g + h*x))
def test_Dist():
assert Dist(x, a + b, x) == a*x + b*x
assert Dist(x, Integral(a + b , x), x) == x*Integral(a + b, x)
assert Dist(3*x,(a+b), x) - Dist(2*x, (a+b), x) == a*x + b*x
assert Dist(3*x,(a+b), x) + Dist(2*x, (a+b), x) == 5*a*x + 5*b*x
assert Dist(x, c*Integral((a + b), x), x) == c*x*Integral(a + b, x)
def test_IntegralFreeQ():
assert not IntegralFreeQ(Integral(a, x))
assert IntegralFreeQ(a + b)
def test_OneQ():
from sympy.integrals.rubi.utility_function import OneQ
assert OneQ(S(1))
assert not OneQ(S(2))
def test_DerivativeDivides():
assert not DerivativeDivides(x, x, x)
assert not DerivativeDivides(a, x + y, b)
assert DerivativeDivides(a + x, a, x) == a
assert DerivativeDivides(a + b, x + y, b) == x + y
def test_LogIntegral():
from sympy.integrals.rubi.utility_function import LogIntegral
assert LogIntegral(a) == li(a)
def test_SinIntegral():
from sympy.integrals.rubi.utility_function import SinIntegral
assert SinIntegral(a) == Si(a)
def test_CosIntegral():
from sympy.integrals.rubi.utility_function import CosIntegral
assert CosIntegral(a) == Ci(a)
def test_SinhIntegral():
from sympy.integrals.rubi.utility_function import SinhIntegral
assert SinhIntegral(a) == Shi(a)
def test_CoshIntegral():
from sympy.integrals.rubi.utility_function import CoshIntegral
assert CoshIntegral(a) == Chi(a)
def test_ExpIntegralEi():
from sympy.integrals.rubi.utility_function import ExpIntegralEi
assert ExpIntegralEi(a) == Ei(a)
def test_ExpIntegralE():
from sympy.integrals.rubi.utility_function import ExpIntegralE
assert ExpIntegralE(a, z) == expint(a, z)
def test_LogGamma():
from sympy.integrals.rubi.utility_function import LogGamma
assert LogGamma(a) == loggamma(a)
def test_Factorial():
from sympy.integrals.rubi.utility_function import Factorial
assert Factorial(S(5)) == 120
def test_Zeta():
from sympy.integrals.rubi.utility_function import Zeta
assert Zeta(a, z) == zeta(a, z)
def test_HypergeometricPFQ():
from sympy.integrals.rubi.utility_function import HypergeometricPFQ
assert HypergeometricPFQ([a, b], [c], z) == hyper([a, b], [c], z)
def test_PolyGamma():
assert PolyGamma(S(2), S(3)) == polygamma(2, 3)
def test_ProductLog():
from sympy.core.evalf import N
assert N(ProductLog(S(5.0)), 5) == N(1.32672466524220, 5)
assert N(ProductLog(S(2), S(3.5)), 5) == N(-1.14064876353898 + 10.8912237027092*I, 5)
def test_PolynomialQuotient():
assert PolynomialQuotient(rubi_log((-a*d + b*c)/(b*(c + d*x)))/(c + d*x), a + b*x, e) == rubi_log((-a*d + b*c)/(b*(c + d*x)))/((a + b*x)*(c + d*x))
assert PolynomialQuotient(x**2, x + a, x) == -a + x
def test_PolynomialRemainder():
assert PolynomialRemainder(rubi_log((-a*d + b*c)/(b*(c + d*x)))/(c + d*x), a + b*x, e) == 0
assert PolynomialRemainder(x**2, x + a, x) == a**2
def test_Floor():
assert Floor(S(7.5)) == 7
assert Floor(S(15.5), S(6)) == 12
def test_Factor():
from sympy.integrals.rubi.utility_function import Factor
assert Factor(a*b + a*c) == a*(b + c)
def test_Rule():
from sympy.integrals.rubi.utility_function import Rule
assert Rule(x, S(5)) == {x: 5}
def test_Distribute():
assert Distribute((a + b)*c + (a + b)*d, Add) == c*(a + b) + d*(a + b)
assert Distribute((a + b)*(c + e), Add) == a*c + a*e + b*c + b*e
def test_CoprimeQ():
assert CoprimeQ(S(7), S(5))
assert not CoprimeQ(S(6), S(3))
def test_Discriminant():
from sympy.integrals.rubi.utility_function import Discriminant
assert Discriminant(a*x**2 + b*x + c, x) == b**2 - 4*a*c
assert unchanged(Discriminant, 1/x, x)
def test_Sum_doit():
assert Sum_doit(2*x + 2, [x, 0, 1.7]) == 6
def test_DeactivateTrig():
assert DeactivateTrig(sec(a + b*x), x) == sec(a + b*x)
def test_Negative():
from sympy.integrals.rubi.utility_function import Negative
assert Negative(S(-2))
assert not Negative(S(0))
def test_Quotient():
from sympy.integrals.rubi.utility_function import Quotient
assert Quotient(17, 5) == 3
def test_process_trig():
assert process_trig(x*cot(x)) == x/tan(x)
assert process_trig(coth(x)*csc(x)) == S(1)/(tanh(x)*sin(x))
def test_replace_pow_exp():
assert replace_pow_exp(rubi_exp(S(5))) == exp(S(5))
def test_rubi_unevaluated_expr():
from sympy.integrals.rubi.utility_function import rubi_unevaluated_expr
assert rubi_unevaluated_expr(a)*rubi_unevaluated_expr(b) == rubi_unevaluated_expr(b)*rubi_unevaluated_expr(a)
def test_rubi_exp():
# class name in utility_function is `exp`. To avoid confusion `rubi_exp` has been used here
assert isinstance(rubi_exp(a), Pow)
def test_rubi_log():
# class name in utility_function is `log`. To avoid confusion `rubi_log` has been used here
assert rubi_log(rubi_exp(S(a))) == a
| 38.963913
| 279
| 0.60818
|
c8030fee76d6cbc801d010ff26f7e74778bcc02b
| 3,516
|
py
|
Python
|
test/GeneSet_Enrichment_server_test.py
|
kbasecollaborations/GeneSet_Enrichment
|
14a5e409019457bfbe985236ff103edb2e8896c7
|
[
"MIT"
] | null | null | null |
test/GeneSet_Enrichment_server_test.py
|
kbasecollaborations/GeneSet_Enrichment
|
14a5e409019457bfbe985236ff103edb2e8896c7
|
[
"MIT"
] | null | null | null |
test/GeneSet_Enrichment_server_test.py
|
kbasecollaborations/GeneSet_Enrichment
|
14a5e409019457bfbe985236ff103edb2e8896c7
|
[
"MIT"
] | 1
|
2020-01-17T20:26:32.000Z
|
2020-01-17T20:26:32.000Z
|
# -*- coding: utf-8 -*-
import os
import time
import re
import unittest
from configparser import ConfigParser
from GeneSet_Enrichment.GeneSet_EnrichmentImpl import GeneSet_Enrichment
from GeneSet_Enrichment.GeneSet_EnrichmentServer import MethodContext
from GeneSet_Enrichment.authclient import KBaseAuth as _KBaseAuth
from installed_clients.WorkspaceClient import Workspace
class GeneSet_EnrichmentTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
token = os.environ.get('KB_AUTH_TOKEN', None)
config_file = os.environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('GeneSet_Enrichment'):
cls.cfg[nameval[0]] = nameval[1]
# Getting username from Auth profile for token
authServiceUrl = cls.cfg['auth-service-url']
auth_client = _KBaseAuth(authServiceUrl)
user_id = auth_client.get_user(token)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'user_id': user_id,
'provenance': [
{'service': 'GeneSet_Enrichment',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = Workspace(cls.wsURL)
cls.serviceImpl = GeneSet_Enrichment(cls.cfg)
cls.scratch = cls.cfg['scratch']
cls.callback_url = os.environ['SDK_CALLBACK_URL']
suffix = int(time.time() * 1000)
cls.wsName = "test_ContigFilter_" + str(suffix)
ret = cls.wsClient.create_workspace({'workspace': cls.wsName}) # noqa
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
# NOTE: According to Python unittest naming rules test method names should start from 'test'. # noqa
def test_your_method(self):
# Prepare test objects in workspace if needed using
# self.getWsClient().save_objects({'workspace': self.getWsName(),
# 'objects': []})
#
# Run your method by
# ret = self.getImpl().your_method(self.getContext(), parameters...)
#
# Check returned data with
# self.assertEqual(ret[...], ...) or other unittest methods
params = {
'workspace_name': 'man4ish_gupta:narrative_1579638904735',
'genelist' : ['36575/30/1', '36575/10/4', '36575/209/5', '36575/214/1']
}
ret = self.serviceImpl.run_GeneSet_Enrichment(self.ctx, params)
def test_build_feature_set(self):
input_params = {
'genome': '35540/2/1',
'genes': "AT3G14440\nAT1G20440\nAT4G27520\nAT2G23120\nAT5G58070\nAT1G20450\nAT1G01470\nAT5G52310\nAT1G76180\nAT2G02100\nAT1G73480\nAT4G15480\nAT3G29575",
'description': "sameple geneset",
'workspace_name': 'man4ish_gupta:narrative_1579638904735',
'output_feature_set': 'new_feature_set',
}
result = self.serviceImpl.build_Featureset(self.ctx, input_params)
| 38.637363
| 165
| 0.617747
|
f6aa035a571f20263466b83366e6ff7a2706d8d4
| 2,175
|
py
|
Python
|
msbd/preprocessamento/riempire_na.py
|
mnslarcher/metodi-statistici-big-data
|
4587b4e4104557e50d09d028259d6c42c44d2814
|
[
"MIT"
] | 1
|
2019-02-17T09:28:04.000Z
|
2019-02-17T09:28:04.000Z
|
msbd/preprocessamento/riempire_na.py
|
mnslarcher/metodi-statistici-big-data
|
4587b4e4104557e50d09d028259d6c42c44d2814
|
[
"MIT"
] | null | null | null |
msbd/preprocessamento/riempire_na.py
|
mnslarcher/metodi-statistici-big-data
|
4587b4e4104557e50d09d028259d6c42c44d2814
|
[
"MIT"
] | null | null | null |
from sklearn.base import TransformerMixin
from sklearn.utils.validation import check_is_fitted
class RiempireNAMedia(TransformerMixin):
"""Riempie i valori mancanti utilizzando la media
Attributes
----------
media_dict_ : dict
Dizionario che associa ad ogni colonna il suo valore medio.
"""
def __init__(self):
pass
def fit(self, X, y=None):
# DataFrame.mean() ha come default numeric_only=True
self.media_dict_ = X.mean().to_dict()
return self
def transform(self, X):
check_is_fitted(self, "media_dict_")
return X.fillna(self.media_dict_)
class RiempireNAItemWeight(TransformerMixin):
"""Riempie i valori mancanti di 'Item_Weight'
Attributes
----------
weight_dict_ : dict
Dizionario che associa ad ogni 'Item_Identifier' il suo
'Item_Weight' medio.
"""
def __init__(self):
pass
def fit(self, X, y=None):
self.weight_dict_ = X[["Item_Identifier", "Item_Weight"]].groupby(
"Item_Identifier").mean()["Item_Weight"].to_dict()
return self
def transform(self, X):
check_is_fitted(self, "weight_dict_")
value = {idx : self.weight_dict_.get(identifier) for idx, identifier in
zip(X.index, X["Item_Identifier"])}
X["Item_Weight"].fillna(value, inplace=True)
return X
class RiempireNAOutletSize(TransformerMixin):
"""Riempie i valori mancanti di 'Outlet_Size'"""
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
is_null = X["Outlet_Size"].isnull()
is_tier2 = X["Outlet_Location_Type"] == "Tier 2"
is_grocery = X["Outlet_Type"] == "Grocery Store"
is_type2 = X["Outlet_Type"] == "Supermarket Type2"
is_type3 = X["Outlet_Type"] == "Supermarket Type3"
X.loc[is_null & (is_tier2 | is_grocery), "Outlet_Size"] = "Small"
X.loc[is_null & (is_tier2 | is_grocery), "Outlet_Size"] = "Small"
X.loc[is_null & is_type2, "Outlet_Size"] = "Medium"
X.loc[is_null & is_type3, "Outlet_Size"] = "Medium"
return X
| 27.1875
| 79
| 0.625287
|
b5164dab19000bfe4c7cde9a7bceba449ed1bdc7
| 9,222
|
py
|
Python
|
pytorch3d/renderer/implicit/raymarching.py
|
shubham-goel/pytorch3d
|
e5e6e90af6f81b3eccb35bbdfdc7e64ec6a4df21
|
[
"BSD-3-Clause"
] | 5
|
2021-01-26T11:57:26.000Z
|
2021-06-24T14:56:07.000Z
|
pytorch3d/renderer/implicit/raymarching.py
|
shubham-goel/pytorch3d
|
e5e6e90af6f81b3eccb35bbdfdc7e64ec6a4df21
|
[
"BSD-3-Clause"
] | null | null | null |
pytorch3d/renderer/implicit/raymarching.py
|
shubham-goel/pytorch3d
|
e5e6e90af6f81b3eccb35bbdfdc7e64ec6a4df21
|
[
"BSD-3-Clause"
] | 1
|
2020-04-14T18:22:32.000Z
|
2020-04-14T18:22:32.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import warnings
from typing import Optional, Tuple, Union
import torch
class EmissionAbsorptionRaymarcher(torch.nn.Module):
"""
Raymarch using the Emission-Absorption (EA) algorithm.
The algorithm independently renders each ray by analyzing density and
feature values sampled at (typically uniformly) spaced 3D locations along
each ray. The density values `rays_densities` are of shape
`(..., n_points_per_ray)`, their values should range between [0, 1], and
represent the opaqueness of each point (the higher the less transparent).
The feature values `rays_features` of shape
`(..., n_points_per_ray, feature_dim)` represent the content of the
point that is supposed to be rendered in case the given point is opaque
(i.e. its density -> 1.0).
EA first utilizes `rays_densities` to compute the absorption function
along each ray as follows:
```
absorption = cumprod(1 - rays_densities, dim=-1)
```
The value of absorption at position `absorption[..., k]` specifies
how much light has reached `k`-th point along a ray since starting
its trajectory at `k=0`-th point.
Each ray is then rendered into a tensor `features` of shape `(..., feature_dim)`
by taking a weighed combination of per-ray features `rays_features` as follows:
```
weights = absorption * rays_densities
features = (rays_features * weights).sum(dim=-2)
```
Where `weights` denote a function that has a strong peak around the location
of the first surface point that a given ray passes through.
Note that for a perfectly bounded volume (with a strictly binary density),
the `weights = cumprod(1 - rays_densities, dim=-1) * rays_densities`
function would yield 0 everywhere. In order to prevent this,
the result of the cumulative product is shifted `self.surface_thickness`
elements along the ray direction.
"""
def __init__(self, surface_thickness: int = 1):
"""
Args:
surface_thickness: Denotes the overlap between the absorption
function and the density function.
"""
super().__init__()
self.surface_thickness = surface_thickness
def forward(
self,
rays_densities: torch.Tensor,
rays_features: torch.Tensor,
eps: float = 1e-10,
**kwargs,
) -> torch.Tensor:
"""
Args:
rays_densities: Per-ray density values represented with a tensor
of shape `(..., n_points_per_ray, 1)` whose values range in [0, 1].
rays_features: Per-ray feature values represented with a tensor
of shape `(..., n_points_per_ray, feature_dim)`.
eps: A lower bound added to `rays_densities` before computing
the absorbtion function (cumprod of `1-rays_densities` along
each ray). This prevents the cumprod to yield exact 0
which would inhibit any gradient-based learning.
Returns:
features_opacities: A tensor of shape `(..., feature_dim+1)`
that concatenates two tensors alonng the last dimension:
1) features: A tensor of per-ray renders
of shape `(..., feature_dim)`.
2) opacities: A tensor of per-ray opacity values
of shape `(..., 1)`. Its values range between [0, 1] and
denote the total amount of light that has been absorbed
for each ray. E.g. a value of 0 corresponds to the ray
completely passing through a volume. Please refer to the
`AbsorptionOnlyRaymarcher` documentation for the
explanation of the algorithm that computes `opacities`.
"""
_check_raymarcher_inputs(
rays_densities,
rays_features,
None,
z_can_be_none=True,
features_can_be_none=False,
density_1d=True,
)
_check_density_bounds(rays_densities)
rays_densities = rays_densities[..., 0]
absorption = _shifted_cumprod(
(1.0 + eps) - rays_densities, shift=self.surface_thickness
)
weights = rays_densities * absorption
features = (weights[..., None] * rays_features).sum(dim=-2)
opacities = 1.0 - torch.prod(1.0 - rays_densities, dim=-1, keepdim=True)
return torch.cat((features, opacities), dim=-1)
class AbsorptionOnlyRaymarcher(torch.nn.Module):
"""
Raymarch using the Absorption-Only (AO) algorithm.
The algorithm independently renders each ray by analyzing density and
feature values sampled at (typically uniformly) spaced 3D locations along
each ray. The density values `rays_densities` are of shape
`(..., n_points_per_ray, 1)`, their values should range between [0, 1], and
represent the opaqueness of each point (the higher the less transparent).
The algorithm only measures the total amount of light absorbed along each ray
and, besides outputting per-ray `opacity` values of shape `(...,)`,
does not produce any feature renderings.
The algorithm simply computes `total_transmission = prod(1 - rays_densities)`
of shape `(..., 1)` which, for each ray, measures the total amount of light
that passed through the volume.
It then returns `opacities = 1 - total_transmission`.
"""
def __init__(self):
super().__init__()
def forward(
self, rays_densities: torch.Tensor, **kwargs
) -> Union[None, torch.Tensor]:
"""
Args:
rays_densities: Per-ray density values represented with a tensor
of shape `(..., n_points_per_ray)` whose values range in [0, 1].
Returns:
opacities: A tensor of per-ray opacity values of shape `(..., 1)`.
Its values range between [0, 1] and denote the total amount
of light that has been absorbed for each ray. E.g. a value
of 0 corresponds to the ray completely passing through a volume.
"""
_check_raymarcher_inputs(
rays_densities,
None,
None,
features_can_be_none=True,
z_can_be_none=True,
density_1d=True,
)
rays_densities = rays_densities[..., 0]
_check_density_bounds(rays_densities)
total_transmission = torch.prod(1 - rays_densities, dim=-1, keepdim=True)
opacities = 1.0 - total_transmission
# pyre-fixme[7]: Expected `Optional[torch.Tensor]` but got `float`.
return opacities
def _shifted_cumprod(x, shift=1):
"""
Computes `torch.cumprod(x, dim=-1)` and prepends `shift` number of
ones and removes `shift` trailing elements to/from the last dimension
of the result.
"""
x_cumprod = torch.cumprod(x, dim=-1)
x_cumprod_shift = torch.cat(
[torch.ones_like(x_cumprod[..., :shift]), x_cumprod[..., :-shift]], dim=-1
)
return x_cumprod_shift
def _check_density_bounds(
rays_densities: torch.Tensor, bounds: Tuple[float, float] = (0.0, 1.0)
):
"""
Checks whether the elements of `rays_densities` range within `bounds`.
If not issues a warning.
"""
# pyre-fixme[16]: `ByteTensor` has no attribute `any`.
if ((rays_densities > bounds[1]) | (rays_densities < bounds[0])).any():
warnings.warn(
"One or more elements of rays_densities are outside of valid"
+ f"range {str(bounds)}"
)
def _check_raymarcher_inputs(
rays_densities: torch.Tensor,
rays_features: Optional[torch.Tensor],
rays_z: Optional[torch.Tensor],
features_can_be_none: bool = False,
z_can_be_none: bool = False,
density_1d: bool = True,
):
"""
Checks the validity of the inputs to raymarching algorithms.
"""
if not torch.is_tensor(rays_densities):
raise ValueError("rays_densities has to be an instance of torch.Tensor.")
if not z_can_be_none and not torch.is_tensor(rays_z):
raise ValueError("rays_z has to be an instance of torch.Tensor.")
if not features_can_be_none and not torch.is_tensor(rays_features):
raise ValueError("rays_features has to be an instance of torch.Tensor.")
if rays_densities.ndim < 1:
raise ValueError("rays_densities have to have at least one dimension.")
if density_1d and rays_densities.shape[-1] != 1:
raise ValueError(
"The size of the last dimension of rays_densities has to be one."
)
rays_shape = rays_densities.shape[:-1]
# pyre-fixme[16]: `Optional` has no attribute `shape`.
if not z_can_be_none and rays_z.shape != rays_shape:
raise ValueError("rays_z have to be of the same shape as rays_densities.")
if not features_can_be_none and rays_features.shape[:-1] != rays_shape:
raise ValueError(
"The first to previous to last dimensions of rays_features"
" have to be the same as all dimensions of rays_densities."
)
| 40.625551
| 84
| 0.642919
|
9d522d3826952dfaa454c0e8e187eb6a57c0be2e
| 3,562
|
py
|
Python
|
weasyprint/tests/test_tools.py
|
szmenpro/WeasyPrint
|
80c5a8287133259f5deff4442adf8dc64dd4f65c
|
[
"BSD-3-Clause"
] | 2
|
2019-12-06T15:40:14.000Z
|
2020-07-29T21:30:35.000Z
|
weasyprint/tests/test_tools.py
|
szmenpro/WeasyPrint
|
80c5a8287133259f5deff4442adf8dc64dd4f65c
|
[
"BSD-3-Clause"
] | 13
|
2020-03-24T17:53:51.000Z
|
2022-02-10T20:01:14.000Z
|
weasyprint/tests/test_tools.py
|
szmenpro/WeasyPrint
|
80c5a8287133259f5deff4442adf8dc64dd4f65c
|
[
"BSD-3-Clause"
] | null | null | null |
"""
weasyprint.tests.test_tools
---------------------------
Test WeasyPrint Web tools.
:copyright: Copyright 2011-2019 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import io
from urllib.parse import urlencode
import cairocffi as cairo
from ..tools import navigator, renderer
from ..urls import path2url
from .testing_utils import assert_no_logs
def wsgi_client(module, path_info, qs_args=None, method='GET'):
start_response_calls = []
def start_response(status, headers):
start_response_calls.append((status, headers))
environ = {'REQUEST_METHOD': method, 'PATH_INFO': path_info}
qs = urlencode(qs_args or {})
if method == 'POST':
environ['wsgi.input'] = io.BytesIO(qs.encode('utf-8'))
environ['CONTENT_LENGTH'] = len(qs.encode('utf-8'))
else:
environ['QUERY_STRING'] = qs
response = b''.join(module.app(environ, start_response))
assert len(start_response_calls) == 1
status, headers = start_response_calls[0]
return status, dict(headers), response
@assert_no_logs
def test_navigator(tmpdir):
status, headers, body = wsgi_client(navigator, '/lipsum')
assert status == '404 Not Found'
status, headers, body = wsgi_client(navigator, '/')
body = body.decode('utf8')
assert status == '200 OK'
assert headers['Content-Type'].startswith('text/html;')
assert '<title>WeasyPrint Navigator</title>' in body
assert '<img' not in body
assert '></a>' not in body
test_file = tmpdir.join('test.html')
test_file.write(b'''
<h1 id=foo><a href="http://weasyprint.org">Lorem ipsum</a></h1>
<h2><a href="#foo">bar</a></h2>
''')
url = path2url(test_file.strpath)
for status, headers, body in [
wsgi_client(navigator, '/view/' + url),
wsgi_client(navigator, '/', {'url': url}),
]:
body = body.decode('utf8')
assert status == '200 OK'
assert headers['Content-Type'].startswith('text/html;')
assert '<title>WeasyPrint Navigator</title>' in body
assert '<img src="data:image/png;base64,' in body
assert ' name="foo"></a>' in body
assert ' href="#foo"></a>' in body
assert ' href="/view/http://weasyprint.org"></a>' in body
status, headers, body = wsgi_client(navigator, '/pdf/' + url)
assert status == '200 OK'
assert headers['Content-Type'] == 'application/pdf'
assert body.startswith(b'%PDF')
if cairo.cairo_version() >= 11504:
assert b'/URI (http://weasyprint.org)' in body
assert b'/Title (Lorem ipsum)' in body
@assert_no_logs
def test_renderer():
status, headers, body = wsgi_client(renderer, '/lipsum')
assert status == '404 Not Found'
status, headers, body_1 = wsgi_client(renderer, '/')
assert b'data:image/png;base64,iVBO' in body_1
status, headers, body_2 = wsgi_client(
renderer, '/', {'content': renderer.DEFAULT_CONTENT}, method='POST')
assert body_1 == body_2
status, headers, body_3 = wsgi_client(
renderer, '/', {'content': 'abc'}, method='POST')
assert b'data:image/png;base64,iVBO' in body_3
assert body_1 != body_3
status, headers, body_4 = wsgi_client(
renderer, '/render', {'content': 'abc'}, method='POST')
assert body_4.startswith(b'iVBO')
assert body_4 in body_3
status, headers, body_5 = wsgi_client(
renderer, '/render', {'content': 'def'}, method='POST')
assert body_5.startswith(b'iVBO')
assert body_5 not in body_3
| 32.981481
| 78
| 0.639528
|
a41f91146ed0db1ea94e74c388f087b5fc3940f9
| 3,870
|
py
|
Python
|
pGUID_InvCodeMatchingMulti.py
|
kleblanc5909/FuzzyMatchIDs
|
d1c5ef37ea19f08cc30193285c9d98de0ae3d5c4
|
[
"MIT"
] | null | null | null |
pGUID_InvCodeMatchingMulti.py
|
kleblanc5909/FuzzyMatchIDs
|
d1c5ef37ea19f08cc30193285c9d98de0ae3d5c4
|
[
"MIT"
] | 1
|
2019-03-28T18:14:31.000Z
|
2019-08-09T14:28:28.000Z
|
pGUID_InvCodeMatchingMulti.py
|
kleblanc5909/FuzzyMatchIDs
|
d1c5ef37ea19f08cc30193285c9d98de0ae3d5c4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 26 14:02:03 2019
@author: Kim LeBlanc
"""
import pandas as pd
from pandas import ExcelWriter
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
def match2Lists(list1,list2):
"""
Loops over a list and returns fuzzy matches found in a second list.
Inputs:
list1 - list of terms to search for in the master list
list2 - master list that is searched for matches over and over
"""
TopMatches = []
TopScores = []
TopRowIdxs = []
for member in list1:
x=process.extract(member, list2)
print ((x))
TopMatches.append(i[0] for i in x)
TopScores.append(i[1] for i in x)
TopRowIdxs.append(i[2] for i in x)
return TopMatches, TopScores, TopRowIdxs
def createRUID_List(rowIdxList, headerStr):
"""
Loops over a series containing row indices and returns a list of RUID strings.
Inputs:
rowIdxList - collection of row index values
headerStr - DataFrame header string value for column containing RUIDs
Outputs:
new list containing RUID strings
"""
RUID_List = []
for aRowIdx in rowIdxList:
workingRUID=df[headerStr].iloc[aRowIdx]
RUID_List.append(workingRUID)
return RUID_List
df = pd.read_excel("abcd_rucdr_master_forPython.xlsx")
print ('Finished reading in input file.')
#blackList=['NDAR_INV']
#for pattern in blackList:
# df['pGUID_Rutgers'] = df['pGUID_Rutgers'].replace(pattern, '')
#datasets
Unique_DAIC_Invs = df['InvCodeDAIC_OnlyTxt'].dropna()
Unique_Rutgers_Invs = df['InvCodeRUCDR_OnlyTxt'].dropna()
AllRutgersInvs = df['InvCodeMinusDOTxt'].dropna()
AllDAIC_Invs = df['InvCodeMinusROTxt'].dropna()
print ('About to start first match2collections.')
BestMatch_DtoR, BestScore_DtoR, BestRowIdx_DtoR = match2Lists(Unique_DAIC_Invs,AllRutgersInvs)
print ('Just finished first match2collections.')
print ('About to start second match2collections.')
BestMatch_RtoD, BestScore_RtoD, BestRowIdx_RtoD = match2Lists(Unique_Rutgers_Invs, AllDAIC_Invs)
print ('Just finished second match2collections.')
#print ('About to start third match2collections.')
#BestMatch_DtoD, BestScore_DtoD, BestRowIdx_DtoD = match2Lists(Unique_DAIC_IDs, AllDAIC_IDs)
#print ('Just finished third match2collections.')
#print ('About to start fourth match2collections.')
#BestMatch_RtoR, BestScore_RtoR, BestRowIdx_RtoR = match2Lists(Unique_Rutgers_IDs, AllRutgersIDs)
#print ('Just finished fourth match2collections.')
df['BestMatchdf_DtoR']=pd.Series(BestMatch_DtoR)
df['BestScoredf_DtoR']=pd.Series(BestScore_DtoR)
df['BestMatchdf_RtoD']=pd.Series(BestMatch_RtoD)
df['BestScoredf_RtoD']=pd.Series(BestScore_RtoD)
df['BestRowIdxdf_DtoR']=pd.Series(BestRowIdx_DtoR)
df['BestRowIdxdf_RtoD']=pd.Series(BestRowIdx_RtoD)
#df['BestMatchdf_DtoD']=pd.Series(BestMatch_DtoD)
#df['BestScoredf_DtoD']=pd.Series(BestScore_DtoD)
#df['BestMatchdf_RtoR']=pd.Series(BestMatch_RtoR)
#df['BestScoredf_RtoR']=pd.Series(BestScore_RtoR)
#df['BestRowIdxdf_DtoD']=pd.Series(BestRowIdx_DtoD)
#df['BestRowIdxdf_RtoR']=pd.Series(BestRowIdx_RtoR)
pGUID_DtoR_List = createRUID_List(BestRowIdx_DtoR, 'rucdr.SUBCODE')
df['pGUID_DtoR']=pd.Series(pGUID_DtoR_List)
pGUID_RtoD_List = createRUID_List(BestRowIdx_RtoD, 'abcd.id_redcap')
df['pGUID_RtoD']=pd.Series(pGUID_RtoD_List)
#RUID_DtoD_List = createRUID_List(BestRowIdx_DtoD, 'RUID_DAIC')
#df['RUID_DtoD']=pd.Series(RUID_DtoD_List)
#KCode_DtoD_List = createRUID_List(BestRowIdx_DtoD, 'Kim_code')
#df['KCode_DtoD']=pd.Series(KCode_DtoD_List)
#RUID_RtoR_List = createRUID_List(BestRowIdx_RtoR, 'RUID_Rutgers')
#df['RUID_RtoR']=pd.Series(RUID_RtoR_List)
writer = pd.ExcelWriter('FuzzyMatchedInvsOne.xlsx')
df.to_excel(writer,'Sheet1')
writer.save()
| 37.572816
| 98
| 0.734625
|
fbb301af2890d1f10a3ef942308bc2178974740f
| 294
|
py
|
Python
|
maths/first_digit_by_str.py
|
examplehub/Python
|
cd9dd9feb6dd65c2e0d545b5154a0f684e5c1e17
|
[
"Apache-2.0"
] | 9
|
2020-10-03T06:52:35.000Z
|
2021-09-08T03:22:59.000Z
|
maths/first_digit_by_str.py
|
Shubhanshu156/Python-1
|
3bd617843dfae1e9f4855a68a0f9663ea9180da5
|
[
"Apache-2.0"
] | 8
|
2020-10-03T06:52:37.000Z
|
2021-09-25T08:11:53.000Z
|
maths/first_digit_by_str.py
|
Shubhanshu156/Python-1
|
3bd617843dfae1e9f4855a68a0f9663ea9180da5
|
[
"Apache-2.0"
] | 3
|
2020-10-25T08:25:50.000Z
|
2021-09-05T11:16:17.000Z
|
def first_digit(number: int) -> int:
"""
>>> first_digit(-123)
1
>>> first_digit(0)
0
>>> first_digit(123)
1
>>> first_digit(123456789)
1
"""
return int(str(abs(number))[0])
if __name__ == "__main__":
from doctest import testmod
testmod()
| 15.473684
| 36
| 0.547619
|
0162bf31fef1dbf742ce8746a194ca388b9cc3d3
| 1,746
|
py
|
Python
|
models/contact.py
|
ksemish/KseniyaRepository
|
80f476c12c5d5412eed31f243fe4de84982eee46
|
[
"Apache-2.0"
] | null | null | null |
models/contact.py
|
ksemish/KseniyaRepository
|
80f476c12c5d5412eed31f243fe4de84982eee46
|
[
"Apache-2.0"
] | null | null | null |
models/contact.py
|
ksemish/KseniyaRepository
|
80f476c12c5d5412eed31f243fe4de84982eee46
|
[
"Apache-2.0"
] | null | null | null |
from sys import maxsize
class Contacts:
def __init__(self, firstname=None, middlename=None, lastname=None, nickname=None, company=None,
address=None, home=None, mobile=None, work=None, fax=None, email=None,
email2=None, email3=None, homeaddress=None, homephone=None, notes=None, id =None,
all_phones_from_home_page=None, all_emails_from_home_page=None, group_id=None):
self.firstname = firstname
self.middlename = middlename
self.lastname = lastname
self.nickname = nickname
self.company = company
self.address = address
self.home = home
self.mobile = mobile
self.work = work
self.fax = fax
self.email = email
self.email2 = email2
self.email3 = email3
self.homeaddress = homeaddress
self.homephone = homephone
self.notes = notes
self.id = id
self.all_phones_from_home_page = all_phones_from_home_page
self.all_emails_from_home_page = all_emails_from_home_page
self.group_id = group_id
def __repr__(self):
return "%s: %s: %s %s: %s: %s: %s: %s: %s: %s: %s: %s: %s: %s: %s: %s: %s:" \
% (self.id, self.lastname, self.firstname, self.middlename, self.nickname,
self.company, self.address, self.home, self.mobile, self.work, self.fax,
self.email, self.email2, self.email3, self.homeaddress, self.homephone, self.notes)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.lastname == other.lastname and self.firstname == other.firstname
def contact_id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| 41.571429
| 147
| 0.639175
|
be976da1fb29514b2f5822ca5f41cbfc4eb2d2e3
| 125
|
py
|
Python
|
setup.py
|
bgyori/conso
|
fe612ada83c0f43c56b734faa17781baed7387a6
|
[
"CC-BY-4.0"
] | 5
|
2019-08-30T10:54:08.000Z
|
2021-09-20T17:16:12.000Z
|
setup.py
|
bgyori/conso
|
fe612ada83c0f43c56b734faa17781baed7387a6
|
[
"CC-BY-4.0"
] | 26
|
2018-09-21T13:19:25.000Z
|
2019-07-31T09:30:08.000Z
|
setup.py
|
bgyori/conso
|
fe612ada83c0f43c56b734faa17781baed7387a6
|
[
"CC-BY-4.0"
] | 3
|
2020-07-29T02:33:34.000Z
|
2021-03-10T20:49:56.000Z
|
# -*- coding: utf-8 -*-
"""Setup module for CONSO."""
import setuptools
if __name__ == '__main__':
setuptools.setup()
| 13.888889
| 29
| 0.624
|
3aba633a159adc000460c9050a4c25ae58c22dfb
| 345
|
py
|
Python
|
rspec/spec_commands/bundle.py
|
cwilbur/TestRSpec
|
e69209d5891667f90003b32c2190bda41b309663
|
[
"MIT"
] | null | null | null |
rspec/spec_commands/bundle.py
|
cwilbur/TestRSpec
|
e69209d5891667f90003b32c2190bda41b309663
|
[
"MIT"
] | null | null | null |
rspec/spec_commands/bundle.py
|
cwilbur/TestRSpec
|
e69209d5891667f90003b32c2190bda41b309663
|
[
"MIT"
] | null | null | null |
import os
class Bundle(object):
def __init__(self, context):
self.context = context
def result(self):
if not self.context.from_settings("check_for_bundler"):
return
if self.gemfile_exists():
return "bundle exec"
def gemfile_exists(self):
return self.context.gemfile_path()
| 21.5625
| 63
| 0.626087
|
a9727fffa20461af18c70f2a109098456ddc70d6
| 6,180
|
py
|
Python
|
tests/unit/testplan/test_plan_base.py
|
dobragab/testplan
|
407ac1dfd33d19753e41235a1f576aeb06118840
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/testplan/test_plan_base.py
|
dobragab/testplan
|
407ac1dfd33d19753e41235a1f576aeb06118840
|
[
"Apache-2.0"
] | 64
|
2019-04-15T20:56:40.000Z
|
2021-03-23T01:00:30.000Z
|
tests/unit/testplan/test_plan_base.py
|
dobragab/testplan
|
407ac1dfd33d19753e41235a1f576aeb06118840
|
[
"Apache-2.0"
] | null | null | null |
"""TODO."""
import os
import uuid
from testplan import Testplan, TestplanResult
from testplan.common.entity import (Resource, ResourceStatus,
Runnable, RunnableResult)
from testplan.common.utils.exceptions import should_raise
from testplan.common.utils.path import default_runpath
from testplan.common.utils.testing import (
argv_overridden, log_propagation_disabled)
from testplan.common.utils.logger import TESTPLAN_LOGGER
from testplan.report import TestGroupReport
from testplan.runnable import TestRunnerStatus, TestRunner
from testplan.runners.local import LocalRunner
class DummyDriver(Resource):
def starting(self):
self.status.change(ResourceStatus.STARTED)
def stopping(self):
self.status.change(ResourceStatus.STOPPED)
def aborting(self):
pass
class DummyTestResult(RunnableResult):
"""TODO."""
def __init__(self):
super(DummyTestResult, self). __init__()
self.custom = None
self.report = TestGroupReport('test')
class DummyTest(Runnable):
RESULT = DummyTestResult
def __init__(self, name=None):
super(DummyTest, self).__init__()
self.name = name
self.resources.add(DummyDriver(), uid=self.name)
self.resources.add(DummyDriver())
def uid(self):
return self.name or super(DummyTest, self).uid()
def run_tests(self):
self._result.custom = '{}Result[{}]'.format(self.__class__.__name__,
self.name)
def main_batch_steps(self):
self._add_step(self.run_tests)
class MyPool(LocalRunner): # Start is async
def __init__(self, name=None):
super(MyPool, self).__init__()
self.name = name
def uid(self):
return self.name or super(MyPool, self).uid()
def _execute(self, uid):
func = self._input[uid]
test = func()
test.cfg.parent = func.parent_cfg
test.parent = func.parent
test.run()
self._results[uid] = test.result
assert isinstance(self._results[uid], DummyTestResult)
def aborting(self):
pass
def test_testplan():
"""TODO."""
from testplan.base import TestplanParser as MyParser
plan = Testplan(name='MyPlan', port=800,
parse_cmdline=False, parser=MyParser)
assert plan._cfg.name == 'MyPlan'
assert plan._cfg.port == 800
assert plan._cfg.runnable == TestRunner
assert plan.cfg.name == 'MyPlan'
assert plan._runnable.cfg.name == 'MyPlan'
# Argument of manager but not of runnable.
should_raise(AttributeError, getattr, args=(plan._runnable.cfg, 'port'))
assert isinstance(plan.status, TestRunnerStatus)
assert isinstance(plan._runnable.status, TestRunnerStatus)
assert 'local_runner' in plan.resources
assert isinstance(plan.add(DummyTest()), uuid.UUID)
assert plan.add(DummyTest(name='alice'), uid=123) == 123
assert plan.add(DummyTest(name='bob')) == 'bob'
assert 'pool' not in plan.resources
plan.add_resource(MyPool(name='pool'))
assert 'pool' in plan.resources
def task():
return DummyTest(name='tom')
assert isinstance(plan.add(task, resource='pool'), uuid.UUID)
assert isinstance(plan.add(task, resource='pool'), uuid.UUID)
assert len(plan.resources['local_runner']._input) == 3
for key in (123, 'bob'):
assert key in plan.resources['local_runner']._input
assert len(plan.resources['pool']._input) == 2
res = plan.run()
assert res.run is True
assert plan.resources['local_runner'].get(
'bob').custom == 'DummyTestResult[bob]'
assert plan.resources['local_runner'].get(
123).custom == 'DummyTestResult[alice]'
for key in plan.resources['pool']._input.keys():
assert plan.resources['pool'].get(
key).custom == 'DummyTestResult[tom]'
results = plan.result.test_results.values()
expected = ['DummyTestResult[None]', 'DummyTestResult[alice]',
'DummyTestResult[tom]', 'DummyTestResult[tom]',
'DummyTestResult[bob]']
for res in results:
should_raise(AttributeError, getattr, args=(res, 'decorated_value'))
assert res.run is True
assert res.custom in expected
expected.remove(res.custom)
assert len(expected) == 0
def test_testplan_decorator():
"""TODO."""
from testplan import test_plan
@test_plan(name='MyPlan', port=800, parse_cmdline=False)
def main1(plan):
plan.add(DummyTest(name='bob'))
return 123
res = main1() # pylint: disable=no-value-for-parameter
assert isinstance(res, TestplanResult)
assert res.decorated_value == 123
assert res.run is True
pdf_path = 'mypdf.pdf'
with argv_overridden('--pdf', pdf_path):
with log_propagation_disabled(TESTPLAN_LOGGER):
@test_plan(name='MyPlan', port=800)
def main2(plan, parser):
args = parser.parse_args()
assert args.verbose is False
assert args.pdf_path == pdf_path
assert plan.cfg.pdf_path == pdf_path
plan.add(DummyTest(name='bob'))
res = main2() # pylint:disable=assignment-from-no-return,no-value-for-parameter
assert isinstance(res, TestplanResult)
assert res.decorated_value is None
assert res.run is True
def test_testplan_runpath():
"""TODO."""
def runpath_maker(obj):
return '{sep}tmp{sep}custom'.format(sep=os.sep)
plan = Testplan(name='MyPlan', port=800, parse_cmdline=False)
assert plan.runpath is None
plan.run()
assert plan.runpath == default_runpath(plan._runnable)
path = '/var/tmp/user'
plan = Testplan(name='MyPlan', port=800, parse_cmdline=False,
runpath=path)
assert plan.runpath is None
plan.run()
assert plan.runpath == path
plan = Testplan(name='MyPlan', port=800, parse_cmdline=False,
runpath=runpath_maker)
assert plan.runpath is None
plan.run()
assert plan.runpath == runpath_maker(plan._runnable)
| 31.85567
| 92
| 0.648867
|
e7f758ff579884a8d5d164e6ac0b7aa121c6f5ec
| 7,487
|
py
|
Python
|
kentaroy47/predict.py
|
jamiejamiebobamie/Faster_R-CNN
|
9321feae5ee2a40e579784ce23381494afdf1d61
|
[
"MIT"
] | null | null | null |
kentaroy47/predict.py
|
jamiejamiebobamie/Faster_R-CNN
|
9321feae5ee2a40e579784ce23381494afdf1d61
|
[
"MIT"
] | 20
|
2020-03-09T20:31:06.000Z
|
2022-02-10T01:18:12.000Z
|
kentaroy47/predict.py
|
jamiejamiebobamie/Faster_R-CNN
|
9321feae5ee2a40e579784ce23381494afdf1d61
|
[
"MIT"
] | null | null | null |
from __future__ import division
import os
import cv2
import numpy as np
import pickle
import time
from kentaroy47.config import Config as config
from keras import backend as K
from keras.layers import Input
from keras.models import Model
import kentaroy47.roi_helpers as roi_helpers
import argparse
import os
import kentaroy47.resnet as nn
from kentaroy47.visualize import draw_boxes_and_label_on_image_cv2
# from: https://github.com/kentaroy47/frcnn-from-scratch-with-keras
def format_img_size(img, cfg):
""" formats the image size based on config """
img_min_side = float(cfg.im_size)
(height, width, _) = img.shape
if width <= height:
ratio = img_min_side / width
new_height = int(ratio * height)
new_width = int(img_min_side)
else:
ratio = img_min_side / height
new_width = int(ratio * width)
new_height = int(img_min_side)
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
return img, ratio
def format_img_channels(img, cfg):
""" formats the image channels based on config """
img = img[:, :, (2, 1, 0)]
img = img.astype(np.float32)
img[:, :, 0] -= cfg.img_channel_mean[0]
img[:, :, 1] -= cfg.img_channel_mean[1]
img[:, :, 2] -= cfg.img_channel_mean[2]
img /= cfg.img_scaling_factor
img = np.transpose(img, (2, 0, 1))
img = np.expand_dims(img, axis=0)
return img
def format_img(img, C):
""" formats an image for model prediction based on config """
img, ratio = format_img_size(img, C)
img = format_img_channels(img, C)
return img, ratio
# Method to transform the coordinates of the bounding box to its original size
def get_real_coordinates(ratio, x1, y1, x2, y2):
real_x1 = int(round(x1 // ratio))
real_y1 = int(round(y1 // ratio))
real_x2 = int(round(x2 // ratio))
real_y2 = int(round(y2 // ratio))
return real_x1, real_y1, real_x2, real_y2
def predict_single_image(img_path, model_rpn, model_classifier_only, cfg, class_mapping):
st = time.time()
img = cv2.imread(img_path)
if img is None:
print('reading image failed.')
exit(0)
X, ratio = format_img(img, cfg)
if K.image_dim_ordering() == 'tf':
X = np.transpose(X, (0, 2, 3, 1))
# get the feature maps and output from the RPN
[Y1, Y2, F] = model_rpn.predict(X)
# this is result contains all boxes, which is [x1, y1, x2, y2]
result = roi_helpers.rpn_to_roi(Y1, Y2, cfg, K.image_dim_ordering(), overlap_thresh=0.7)
# convert from (x1,y1,x2,y2) to (x,y,w,h)
result[:, 2] -= result[:, 0]
result[:, 3] -= result[:, 1]
bbox_threshold = 0.8
# apply the spatial pyramid pooling to the proposed regions
boxes = dict()
for jk in range(result.shape[0] // cfg.num_rois + 1):
rois = np.expand_dims(result[cfg.num_rois * jk:cfg.num_rois * (jk + 1), :], axis=0)
if rois.shape[1] == 0:
break
if jk == result.shape[0] // cfg.num_rois:
# pad R
curr_shape = rois.shape
target_shape = (curr_shape[0], cfg.num_rois, curr_shape[2])
rois_padded = np.zeros(target_shape).astype(rois.dtype)
rois_padded[:, :curr_shape[1], :] = rois
rois_padded[0, curr_shape[1]:, :] = rois[0, 0, :]
rois = rois_padded
[p_cls, p_regr] = model_classifier_only.predict([F, rois])
for ii in range(p_cls.shape[1]):
if np.max(p_cls[0, ii, :]) < bbox_threshold or np.argmax(p_cls[0, ii, :]) == (p_cls.shape[2] - 1):
continue
cls_num = np.argmax(p_cls[0, ii, :])
if cls_num not in boxes.keys():
boxes[cls_num] = []
(x, y, w, h) = rois[0, ii, :]
try:
(tx, ty, tw, th) = p_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)]
tx /= cfg.classifier_regr_std[0]
ty /= cfg.classifier_regr_std[1]
tw /= cfg.classifier_regr_std[2]
th /= cfg.classifier_regr_std[3]
x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th)
except Exception as e:
print(e)
pass
boxes[cls_num].append(
[cfg.rpn_stride * x, cfg.rpn_stride * y, cfg.rpn_stride * (x + w), cfg.rpn_stride * (y + h),
np.max(p_cls[0, ii, :])])
# add some nms to reduce many boxes
for cls_num, box in boxes.items():
boxes_nms = roi_helpers.non_max_suppression_fast(box, overlap_thresh=0.5)
boxes[cls_num] = boxes_nms
print(class_mapping[cls_num] + ":")
for b in boxes_nms:
b[0], b[1], b[2], b[3] = get_real_coordinates(ratio, b[0], b[1], b[2], b[3])
print('{} prob: {}'.format(b[0: 4], b[-1]))
img = draw_boxes_and_label_on_image_cv2(img, class_mapping, boxes)
print('Elapsed time = {}'.format(time.time() - st))
cv2.imshow('image', img)
result_path = './results_images/{}.png'.format(os.path.basename(img_path).split('.')[0])
print('result saved into ', result_path)
cv2.imwrite(result_path, img)
cv2.waitKey(0)
def predict(args_):
path = args_.path
with open('config.pickle', 'rb') as f_in:
cfg = pickle.load(f_in)
cfg.use_horizontal_flips = False
cfg.use_vertical_flips = False
cfg.rot_90 = False
class_mapping = cfg.class_mapping
if 'bg' not in class_mapping:
class_mapping['bg'] = len(class_mapping)
class_mapping = {v: k for k, v in class_mapping.items()}
input_shape_img = (None, None, 3)
input_shape_features = (None, None, 1024)
img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(cfg.num_rois, 4))
feature_map_input = Input(shape=input_shape_features)
shared_layers = nn.nn_base(img_input, trainable=True)
# define the RPN, built on the base layers
num_anchors = len(cfg.anchor_box_scales) * len(cfg.anchor_box_ratios)
rpn_layers = nn.rpn(shared_layers, num_anchors)
classifier = nn.classifier(feature_map_input, roi_input, cfg.num_rois, nb_classes=len(class_mapping),
trainable=True)
model_rpn = Model(img_input, rpn_layers)
model_classifier_only = Model([feature_map_input, roi_input], classifier)
model_classifier = Model([feature_map_input, roi_input], classifier)
print('Loading weights from {}'.format(cfg.model_path))
model_rpn.load_weights(cfg.model_path, by_name=True)
model_classifier.load_weights(cfg.model_path, by_name=True)
model_rpn.compile(optimizer='sgd', loss='mse')
model_classifier.compile(optimizer='sgd', loss='mse')
if os.path.isdir(path):
for idx, img_name in enumerate(sorted(os.listdir(path))):
if not img_name.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):
continue
print(img_name)
predict_single_image(os.path.join(path, img_name), model_rpn,
model_classifier_only, cfg, class_mapping)
elif os.path.isfile(path):
print('predict image from {}'.format(path))
predict_single_image(path, model_rpn, model_classifier_only, cfg, class_mapping)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--path', '-p', default='images/000010.png', help='image path')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
predict(args)
| 37.064356
| 110
| 0.626419
|
1b51d422ea4c21832927f3baab2ce41a31ba79da
| 1,491
|
py
|
Python
|
azure-mgmt-servicebus/azure/mgmt/servicebus/models/regenerate_access_key_parameters_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-07-23T08:59:24.000Z
|
2018-07-23T08:59:24.000Z
|
azure-mgmt-servicebus/azure/mgmt/servicebus/models/regenerate_access_key_parameters_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-11-29T14:46:42.000Z
|
2018-11-29T14:46:42.000Z
|
azure-mgmt-servicebus/azure/mgmt/servicebus/models/regenerate_access_key_parameters_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-08-28T14:36:47.000Z
|
2018-08-28T14:36:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RegenerateAccessKeyParameters(Model):
"""Parameters supplied to the Regenerate Authorization Rule operation,
specifies which key neeeds to be reset.
All required parameters must be populated in order to send to Azure.
:param key_type: Required. The access key to regenerate. Possible values
include: 'PrimaryKey', 'SecondaryKey'
:type key_type: str or ~azure.mgmt.servicebus.models.KeyType
:param key: Optional, if the key value provided, is reset for KeyType
value or autogenerate Key value set for keyType
:type key: str
"""
_validation = {
'key_type': {'required': True},
}
_attribute_map = {
'key_type': {'key': 'keyType', 'type': 'KeyType'},
'key': {'key': 'key', 'type': 'str'},
}
def __init__(self, *, key_type, key: str=None, **kwargs) -> None:
super(RegenerateAccessKeyParameters, self).__init__(**kwargs)
self.key_type = key_type
self.key = key
| 35.5
| 76
| 0.618377
|
5eddcc152a5eb9123d57f37141a65193ce0d24a3
| 2,284
|
py
|
Python
|
P3-Logs-Analysis/newsreport.py
|
nehal96/full-stack-nd
|
a0721513538fbd7960e5a6408b8609816f81fc9e
|
[
"MIT"
] | null | null | null |
P3-Logs-Analysis/newsreport.py
|
nehal96/full-stack-nd
|
a0721513538fbd7960e5a6408b8609816f81fc9e
|
[
"MIT"
] | null | null | null |
P3-Logs-Analysis/newsreport.py
|
nehal96/full-stack-nd
|
a0721513538fbd7960e5a6408b8609816f81fc9e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import psycopg2
print("NEWS REPORT \n\n")
DBNAME = "news"
conn = psycopg2.connect(dbname=DBNAME) # Connect to database
cur = conn.cursor() # Create cursor object
# SQL statement for Question 1
cur.execute("""SELECT articles.title as "Name of article", viewcount.views as
"Views"
FROM articles,
(SELECT path, count(*) as views
FROM log
GROUP BY path
ORDER BY views DESC)
as viewcount
WHERE viewcount.path = '/article/' || articles.slug
LIMIT 3;""")
# Print question and answer
print("Question 1: What are the three most popular articles of all time? \n")
result = cur.fetchall()
for i, (title, views) in enumerate(result):
print("{0}. {1}: {2} views".format(i+1, title, views))
print("\n\n")
# SQL statement for Question 2
cur.execute("""SELECT authors.name as "Author Name", SUM(viewcount.views) as
"Total Views"
FROM authors JOIN articles
ON authors.id = articles.author
JOIN (SELECT path, count(*) as views
FROM log
GROUP BY path
ORDER BY views DESC) as viewcount
ON viewcount.path = '/article/' || articles.slug
GROUP BY authors.name
ORDER BY "Total Views" DESC;""")
# Print question and answer
print("Question 2: Who are the most popular article authors of all time? \n")
result = cur.fetchall()
for i, (author, views) in enumerate(result):
print("{0}. {1}: {2} views".format(i+1, author, views))
print("\n\n")
# SQL statement for Question 3
# See README for the 3 views created and used in this SQL statement:
# successful, failed, and percent_failed
cur.execute("""SELECT to_char(date, 'FMMonth DD, YYYY'), percent
FROM percent_failed
WHERE percent > 1;""")
# Print question and answer
print("Question 3: On which days did more than 1% of the requests lead "
"to errors? \n")
result = cur.fetchall()
for i, (date, percent) in enumerate(result):
print("{0}. {1}: {2:.2f}%".format(i+1, date, percent))
print("\n\n")
# Close connection to database
conn.close()
| 31.722222
| 77
| 0.594133
|
c0458bc6001df3132c272d0c00791ee55fcdf150
| 18,151
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_02_01/operations/_express_route_circuit_connections_operations.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2019-05-17T21:24:53.000Z
|
2020-02-12T11:13:42.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_02_01/operations/_express_route_circuit_connections_operations.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 15
|
2019-07-12T18:18:04.000Z
|
2019-07-25T20:55:51.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_02_01/operations/_express_route_circuit_connections_operations.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitConnectionsOperations(object):
"""ExpressRouteCircuitConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Deletes the specified Express Route Circuit Connection from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ExpressRouteCircuitConnection"
"""Gets the specified Express Route Circuit Connection from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuitConnection"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
connection_name, # type: str
express_route_circuit_connection_parameters, # type: "models.ExpressRouteCircuitConnection"
**kwargs # type: Any
):
# type: (...) -> "models.ExpressRouteCircuitConnection"
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuitConnection"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(express_route_circuit_connection_parameters, 'ExpressRouteCircuitConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
connection_name, # type: str
express_route_circuit_connection_parameters, # type: "models.ExpressRouteCircuitConnection"
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Creates or updates a Express Route Circuit Connection in the specified express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:param express_route_circuit_connection_parameters: Parameters supplied to the create or update
express route circuit connection operation.
:type express_route_circuit_connection_parameters: ~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuitConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuitConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
connection_name=connection_name,
express_route_circuit_connection_parameters=express_route_circuit_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
| 49.865385
| 249
| 0.677043
|
7e6b155d0c6aa7629b9bc6dd68a094ffd647e56d
| 5,187
|
py
|
Python
|
tools_webrtc/autoroller/unittests/checkin_chromium_dep_test.py
|
lianhuaren/webrtc
|
096f18c11d8acb0d92820f75fdf934607f424cfc
|
[
"DOC",
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
tools_webrtc/autoroller/unittests/checkin_chromium_dep_test.py
|
lianhuaren/webrtc
|
096f18c11d8acb0d92820f75fdf934607f424cfc
|
[
"DOC",
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
tools_webrtc/autoroller/unittests/checkin_chromium_dep_test.py
|
lianhuaren/webrtc
|
096f18c11d8acb0d92820f75fdf934607f424cfc
|
[
"DOC",
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
#!/usr/bin/env python
# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import errno
import json
import os.path
import shutil
import stat
import sys
import tempfile
import unittest
import distutils.dir_util
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARENT_DIR = os.path.join(SCRIPT_DIR, os.pardir)
sys.path.append(PARENT_DIR)
from checkin_chromium_dep import Config, CheckinDependency, RunCommand, \
DependencyNotFound
CHECKOUT_SRC_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, os.pardir,
os.pardir))
FAKE_REMOTE_TEMPLATE_ROOT = os.path.join(SCRIPT_DIR, 'testdata',
'checkin_chromium_dep', 'remote_root')
FAKE_SOURCE_TEMPLATE_ROOT = os.path.join(SCRIPT_DIR, 'testdata',
'checkin_chromium_dep', 'src_root')
def _HandleRemoveReadonly(func, path, exc):
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # 0777
func(path)
else:
raise excvalue
class TestCheckInChromiumDep(unittest.TestCase):
def setUp(self):
self._temp_dir = tempfile.mkdtemp(prefix='webrtc_test_')
self._fake_chromium_repo = tempfile.mkdtemp(prefix='webrtc_test_')
self._fake_source_repo = tempfile.mkdtemp(prefix='webrtc_test_')
print("Temp dir: %s\n"
"Chromium third_party fake repo: %s\n"
"WebRTC source fake repo: %s" % (
self._temp_dir, self._fake_chromium_repo,
self._fake_source_repo))
self._fake_chromium_revision = TestCheckInChromiumDep._InitFakeChromiumRepo(
self._fake_chromium_repo)
TestCheckInChromiumDep._InitFakeSourceRepo(self._fake_source_repo,
self._fake_chromium_revision)
@staticmethod
def _InitFakeChromiumRepo(repo_dir):
RunCommand(['git', 'init'], working_dir=repo_dir)
distutils.dir_util.copy_tree(FAKE_REMOTE_TEMPLATE_ROOT, repo_dir)
RunCommand(['git', 'add', '-A', '.'], working_dir=repo_dir)
RunCommand(['git', 'commit', '-m', 'Init'],
working_dir=repo_dir)
stdout, _ = RunCommand(['git', 'rev-parse', 'HEAD'], working_dir=repo_dir)
return stdout.strip()
@staticmethod
def _InitFakeSourceRepo(repo_dir, chromium_third_party_revision):
RunCommand(['git', 'init'], working_dir=repo_dir)
# Copy repo template
distutils.dir_util.copy_tree(FAKE_SOURCE_TEMPLATE_ROOT, repo_dir)
# Set right chromium third_party revision in DEPS file
with open(os.path.join(repo_dir, 'DEPS'), 'rb') as f:
deps_content = f.read()
deps_content = deps_content % chromium_third_party_revision
with open(os.path.join(repo_dir, 'DEPS'), 'wb') as f:
f.write(deps_content)
# Commit all repo content
RunCommand(['git', 'add', '-A', '.'], working_dir=repo_dir)
RunCommand(['git', 'commit', '-m', 'Init'],
working_dir=repo_dir)
def tearDown(self):
shutil.rmtree(self._temp_dir, ignore_errors=False,
onerror=_HandleRemoveReadonly)
shutil.rmtree(self._fake_chromium_repo, ignore_errors=False,
onerror=_HandleRemoveReadonly)
shutil.rmtree(self._fake_source_repo, ignore_errors=False,
onerror=_HandleRemoveReadonly)
def testCheckIn(self):
third_party_dir = os.path.join(self._fake_source_repo, 'third_party')
CheckinDependency('dep_bar',
Config(
self._fake_source_repo,
'file://%s' % self._fake_chromium_repo,
self._temp_dir))
third_party_deps_list_file = os.path.join(self._fake_source_repo,
'THIRD_PARTY_CHROMIUM_DEPS.json')
with open(third_party_deps_list_file, 'rb') as f:
deps_list = json.load(f).get('dependencies', [])
# New dependency appended to deps list file
self.assertIn('dep_foo', deps_list)
self.assertIn('dep_bar', deps_list)
# Only new dependency was appended
self.assertNotIn('dep_buzz', deps_list)
# New dependency was copied into source tree
self.assertIn('dep_bar', os.listdir(third_party_dir))
self.assertIn(
'source_file.js', os.listdir(os.path.join(third_party_dir, 'dep_bar')))
# Only new dependency was copied into source tree
self.assertNotIn('dep_buzz', os.listdir(third_party_dir))
def testCheckInNotExistingDep(self):
self.assertRaises(DependencyNotFound,
CheckinDependency,
'dep_missing',
Config(self._fake_source_repo,
'file://%s' % self._fake_chromium_repo,
self._temp_dir))
if __name__ == '__main__':
unittest.main()
| 39.59542
| 80
| 0.665124
|
00fb75b7ee7776682f862894361e731926a847bc
| 3,733
|
py
|
Python
|
env/lib/python3.7/site-packages/docusign_rooms/models/api_error.py
|
davidgacc/docusign
|
e63167101656d0066d481844576ce687ea80eb91
|
[
"MIT"
] | null | null | null |
env/lib/python3.7/site-packages/docusign_rooms/models/api_error.py
|
davidgacc/docusign
|
e63167101656d0066d481844576ce687ea80eb91
|
[
"MIT"
] | null | null | null |
env/lib/python3.7/site-packages/docusign_rooms/models/api_error.py
|
davidgacc/docusign
|
e63167101656d0066d481844576ce687ea80eb91
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
DocuSign Rooms API - v2
An API for an integrator to access the features of DocuSign Rooms # noqa: E501
OpenAPI spec version: v2
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ApiError(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'error_code': 'str',
'message': 'str'
}
attribute_map = {
'error_code': 'errorCode',
'message': 'message'
}
def __init__(self, error_code=None, message=None): # noqa: E501
"""ApiError - a model defined in Swagger""" # noqa: E501
self._error_code = None
self._message = None
self.discriminator = None
if error_code is not None:
self.error_code = error_code
if message is not None:
self.message = message
@property
def error_code(self):
"""Gets the error_code of this ApiError. # noqa: E501
:return: The error_code of this ApiError. # noqa: E501
:rtype: str
"""
return self._error_code
@error_code.setter
def error_code(self, error_code):
"""Sets the error_code of this ApiError.
:param error_code: The error_code of this ApiError. # noqa: E501
:type: str
"""
self._error_code = error_code
@property
def message(self):
"""Gets the message of this ApiError. # noqa: E501
:return: The message of this ApiError. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this ApiError.
:param message: The message of this ApiError. # noqa: E501
:type: str
"""
self._message = message
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ApiError, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiError):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.288732
| 83
| 0.556657
|
9bd9e8a19e3a00b266f3826dae2bb0e8d0ff58ad
| 9,629
|
py
|
Python
|
mmaction/models/recognizers/base.py
|
Cogito2012/DEAR
|
97d0e8f191da0f20dcc9721280af48171dabef5e
|
[
"Apache-2.0"
] | 47
|
2021-09-02T10:42:29.000Z
|
2022-03-31T01:37:49.000Z
|
mmaction/models/recognizers/base.py
|
Cogito2012/DEAR
|
97d0e8f191da0f20dcc9721280af48171dabef5e
|
[
"Apache-2.0"
] | 2
|
2021-12-05T02:28:42.000Z
|
2022-01-05T06:46:10.000Z
|
mmaction/models/recognizers/base.py
|
Cogito2012/DEAR
|
97d0e8f191da0f20dcc9721280af48171dabef5e
|
[
"Apache-2.0"
] | 6
|
2021-09-19T16:31:32.000Z
|
2022-03-03T06:57:34.000Z
|
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from mmcv.runner import auto_fp16
from .. import builder
class BaseRecognizer(nn.Module, metaclass=ABCMeta):
"""Base class for recognizers.
All recognizers should subclass it.
All subclass should overwrite:
- Methods:``forward_train``, supporting to forward when training.
- Methods:``forward_test``, supporting to forward when testing.
Args:
backbone (dict): Backbone modules to extract feature.
cls_head (dict): Classification head to process feature.
train_cfg (dict | None): Config for training. Default: None.
test_cfg (dict | None): Config for testing. Default: None.
"""
def __init__(self,
backbone,
cls_head,
neck=None,
debias_head=None,
train_cfg=None,
test_cfg=None):
super().__init__()
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
self.cls_head = builder.build_head(cls_head)
if debias_head is not None:
self.debias_head = builder.build_head(debias_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
# aux_info is the list of tensor names beyond 'imgs' and 'label' which
# will be used in train_step and val_step, data_batch should contain
# these tensors
self.aux_info = []
if train_cfg is not None and 'aux_info' in train_cfg:
self.aux_info = train_cfg['aux_info']
self.init_weights()
self.fp16_enabled = False
def init_weights(self):
"""Initialize the model network weights."""
self.backbone.init_weights()
self.cls_head.init_weights()
if hasattr(self, 'neck'):
self.neck.init_weights()
if hasattr(self, 'debias_head'):
self.debias_head.init_weights()
@auto_fp16()
def extract_feat(self, imgs):
"""Extract features through a backbone.
Args:
imgs (torch.Tensor): The input images.
Returns:
torch.tensor: The extracted features.
"""
x = self.backbone(imgs)
return x
def evidence_to_prob(self, output, evidence_type):
if evidence_type == 'relu':
from ..losses.edl_loss import relu_evidence as evidence
elif evidence_type == 'exp':
from ..losses.edl_loss import exp_evidence as evidence
elif evidence_type == 'softplus':
from ..losses.edl_loss import softplus_evidence as evidence
alpha = evidence(output) + 1
S = torch.sum(alpha, dim=-1, keepdim=True)
prob = alpha / S
return prob
def average_clip(self, cls_score, num_segs=1):
"""Averaging class score over multiple clips.
Using different averaging types ('score' or 'prob' or None,
which defined in test_cfg) to computed the final averaged
class score. Only called in test mode.
Args:
cls_score (torch.Tensor): Class score to be averaged.
num_segs (int): Number of clips for each input sample.
Returns:
torch.Tensor: Averaged class score.
"""
if 'average_clips' not in self.test_cfg.keys():
raise KeyError('"average_clips" must defined in test_cfg\'s keys')
average_clips = self.test_cfg['average_clips']
if average_clips not in ['score', 'prob', 'evidence', None]:
raise ValueError(f'{average_clips} is not supported. '
f'Currently supported ones are '
f'["score", "prob", "evidence", None]')
if average_clips is None:
return cls_score
batch_size = cls_score.shape[0]
cls_score = cls_score.view(batch_size // num_segs, num_segs, -1)
if average_clips == 'prob':
cls_score = F.softmax(cls_score, dim=2).mean(dim=1)
elif average_clips == 'score':
cls_score = cls_score.mean(dim=1)
elif average_clips == 'evidence':
assert 'evidence_type' in self.test_cfg.keys()
cls_score = self.evidence_to_prob(cls_score, self.test_cfg['evidence_type'])
cls_score = cls_score.mean(dim=1)
return cls_score
@abstractmethod
def forward_train(self, imgs, labels, **kwargs):
"""Defines the computation performed at every call when training."""
@abstractmethod
def forward_test(self, imgs):
"""Defines the computation performed at every call when evaluation and
testing."""
@abstractmethod
def forward_gradcam(self, imgs):
"""Defines the computation performed at every all when using gradcam
utils."""
@staticmethod
def _parse_losses(losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary information.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
which may be a weighted sum of all losses, log_vars contains
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def forward(self, imgs, label=None, return_loss=True, **kwargs):
"""Define the computation performed at every call."""
if kwargs.get('gradcam', False):
del kwargs['gradcam']
return self.forward_gradcam(imgs, **kwargs)
if kwargs.get('get_feat', False):
del kwargs['get_feat']
return self.get_feat(imgs, **kwargs)
if return_loss:
if label is None:
raise ValueError('Label should not be None.')
return self.forward_train(imgs, label, **kwargs)
return self.forward_test(imgs, **kwargs)
def train_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data_batch (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
``num_samples``.
``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
``log_vars`` contains all the variables to be sent to the
logger.
``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
imgs = data_batch['imgs']
label = data_batch['label']
aux_info = {}
for item in self.aux_info:
assert item in data_batch
aux_info[item] = data_batch[item]
aux_info.update(kwargs)
losses = self(imgs, label, return_loss=True, **aux_info)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
def val_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
imgs = data_batch['imgs']
label = data_batch['label']
aux_info = {}
for item in self.aux_info:
aux_info[item] = data_batch[item]
losses = self(imgs, label, return_loss=True, **aux_info)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
| 35.929104
| 88
| 0.606293
|
980437f7fe2dbc92b23ca890da032738e632a5da
| 724
|
py
|
Python
|
pyglet-hg/tools/inspect_font.py
|
sangh/LaserShow
|
abc95e465e3455dc220cc602dd58358c84666f29
|
[
"BSD-3-Clause"
] | 21
|
2015-11-03T03:15:36.000Z
|
2021-03-15T22:00:47.000Z
|
tools/inspect_font.py
|
seeminglee/pyglet64
|
3dd167b5b0d3ad132a157e404586e53c2bb21736
|
[
"BSD-3-Clause"
] | 3
|
2017-09-14T14:08:28.000Z
|
2019-05-20T04:38:15.000Z
|
tools/inspect_font.py
|
seeminglee/pyglet64
|
3dd167b5b0d3ad132a157e404586e53c2bb21736
|
[
"BSD-3-Clause"
] | 23
|
2017-04-15T19:23:08.000Z
|
2020-09-08T11:55:29.000Z
|
#!/usr/bin/python
# $Id:$
'''Display font information.
Usage::
inspect_font.py <filename> [<filename> ...]
'''
import sys
from pyglet.font import ttf
def inspect_font(filename):
try:
info = ttf.TruetypeInfo(filename)
print '%s:' % filename,
print info.get_name('family'),
print 'bold=%r' % info.is_bold(),
print 'italic=%r' % info.is_italic(),
except:
print '''%s could not be identified. It is probably not a TrueType or
OpenType font. However, pyglet may still be able to load it
on some platforms.''' % filename
if __name__ == '__main__':
if len(sys.argv) < 2:
print __doc__
for filename in sys.argv[1:]:
inspect_font(filename)
| 23.354839
| 78
| 0.622928
|
95e363e3a8301896ae4bf8548a3537243903d18e
| 684
|
py
|
Python
|
biokinepy/vel_acc.py
|
klevis-a/biokinepy
|
33b3aba0a5281e323fc6e5f743c124ba4d22f96c
|
[
"MIT"
] | 1
|
2021-08-18T21:41:38.000Z
|
2021-08-18T21:41:38.000Z
|
biokinepy/vel_acc.py
|
klevis-a/biokinepy
|
33b3aba0a5281e323fc6e5f743c124ba4d22f96c
|
[
"MIT"
] | null | null | null |
biokinepy/vel_acc.py
|
klevis-a/biokinepy
|
33b3aba0a5281e323fc6e5f743c124ba4d22f96c
|
[
"MIT"
] | null | null | null |
import numpy as np
import quaternion
def ang_vel(mat_traj: np.ndarray, dt) -> np.ndarray:
"""Return the angular velocity of the rotation matrix trajectory (N, 3, 3)."""
mats_vel = np.gradient(mat_traj, dt, axis=0)
mats_t = np.swapaxes(mat_traj, -2, -1)
ang_vel_tensor = mats_vel @ mats_t
ang_vel_vector = np.stack((ang_vel_tensor[:, 2, 1], ang_vel_tensor[:, 0, 2], ang_vel_tensor[:, 1, 0]), -1)
return ang_vel_vector
def ang_vel_quat(quat_traj: np.ndarray, dt) -> np.ndarray:
"""Return the angular velocity of a quaternion trajectory (N, 4)."""
mats = quaternion.as_rotation_matrix(quaternion.as_quat_array(quat_traj))
return ang_vel(mats, dt)
| 38
| 110
| 0.69883
|
e6340fc6efff3090dcbdf451c98f3e2580a8eb68
| 2,144
|
py
|
Python
|
viz_graph.py
|
ankurankan/graph_viz
|
ecf1237e9ae739bc06a8fafe8830d4ad21ab88a5
|
[
"MIT"
] | 1
|
2017-04-10T08:08:03.000Z
|
2017-04-10T08:08:03.000Z
|
viz_graph.py
|
ankurankan/graph_viz
|
ecf1237e9ae739bc06a8fafe8830d4ad21ab88a5
|
[
"MIT"
] | null | null | null |
viz_graph.py
|
ankurankan/graph_viz
|
ecf1237e9ae739bc06a8fafe8830d4ad21ab88a5
|
[
"MIT"
] | null | null | null |
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.animation as animation
class viz_graph(nx.Graph):
def viz_neighbors(self, node, color='b'):
"""
Draw the graph using matplotlib with highlighted neighbors.
Parameters
----------
node: Networkx node
The node whose neighbors are to be highlighted.
color:
The color of neighbouring nodes.
If not specified the default color is blue
"""
neighbor_nodes = self.neighbors(node)
colors_list = [color if i in neighbor_nodes else 'r' for i in self.nodes()]
nx.draw_networkx(self, nx.spring_layout(self), node_color=colors_list)
def viz_bfs(self, node, color='b'):
"""
Animation for breadth first search
Parameters
----------
node: Networkx node
The node where to start the breadth first search from
color:
The color of the visited nodes.
If color not specified the default color is blue
"""
def animati(i):
nx.draw_circular(self, node_color=final_colors[i])
def bfs(s):
animate = []
Q = [s]
visited = [s]
animate.append(visited[::])
while Q:
node = Q.pop(0)
for v in self.neighbors(node):
if v not in visited:
visited.append(v)
Q.append(v)
animate.append(visited[::])
return (animate)
colors = ['r' for i in range(self.number_of_nodes())]
final_colors = []
animate = bfs(node)
for i in animate:
temp_colors = colors[::]
for j in i:
temp_colors[j-1] = 'b'
final_colors.append(temp_colors)
print(final_colors)
fig = plt.figure()
animation.FuncAnimation(fig, self.animate, fargs=final_colors)
def animate(self, color_list):
print color_list
nx.draw_circular(self, node_color=color_list[i])
| 31.072464
| 83
| 0.542444
|
6283fc8059609d25d9b9c3bde54e11001c3a9f39
| 3,064
|
py
|
Python
|
conf_testing/rules/habapp/test_habapp.py
|
DerOetzi/HABApp
|
a123fbfa9928ebb3cda9a84f6984dcba593c8236
|
[
"Apache-2.0"
] | 44
|
2018-12-13T08:46:44.000Z
|
2022-03-07T03:23:21.000Z
|
conf_testing/rules/habapp/test_habapp.py
|
DerOetzi/HABApp
|
a123fbfa9928ebb3cda9a84f6984dcba593c8236
|
[
"Apache-2.0"
] | 156
|
2019-03-02T20:53:31.000Z
|
2022-03-23T13:13:58.000Z
|
conf_testing/rules/habapp/test_habapp.py
|
DerOetzi/HABApp
|
a123fbfa9928ebb3cda9a84f6984dcba593c8236
|
[
"Apache-2.0"
] | 18
|
2019-03-08T07:13:21.000Z
|
2022-03-22T19:52:31.000Z
|
import time
import HABApp
from HABApp.core.events import ItemNoUpdateEvent, ItemNoChangeEvent, ValueUpdateEvent
from HABApp.core.items import Item
from HABAppTests import TestBaseRule, EventWaiter, get_random_name
class TestItemEvents(TestBaseRule):
def __init__(self):
super().__init__()
self.add_test('Item const', self.item_events, changes=False, secs=2, values=['MyVal', 'MyVal', 'MyVal'])
self.add_test('Item change', self.item_events, changes=True, secs=2, values=['MyVal1', 'MyVal2', 'MyVal3'])
def check_event(self, event: ItemNoUpdateEvent):
assert event.name == self.watch_item.name, f'Wrong name: {event.name} != {self.watch_item.name}'
assert event.seconds == self.secs, f'Wrong seconds: {event.seconds} != {self.secs}'
dur = time.time() - self.ts_set - self.secs
assert abs(dur) < 0.05, f'Time wrong: {abs(dur):.2f}'
def item_events(self, changes=False, secs=5, values=[]):
item_name = get_random_name('HABApp')
self.watch_item = Item.get_create_item(item_name)
self.secs = secs
watcher = (self.watch_item.watch_change if changes else self.watch_item.watch_update)(secs)
event = ItemNoUpdateEvent if not changes else ItemNoChangeEvent
listener = self.listen_event(self.watch_item, self.check_event, event)
try:
self._run(values, event)
HABApp.core.Items.pop_item(item_name)
assert not HABApp.core.Items.item_exists(item_name)
time.sleep(0.5)
self.watch_item = Item.get_create_item(item_name)
self._run(values, event)
finally:
listener.cancel()
watcher.cancel()
return None
def _run(self, values, event):
self.ts_set = 0
for step, value in enumerate(values):
if step:
time.sleep(0.2)
self.ts_set = time.time()
self.watch_item.set_value(value)
with EventWaiter(self.watch_item.name, event, self.secs + 2) as w:
w.wait_for_event(seconds=self.secs)
TestItemEvents()
class TestItemListener(TestBaseRule):
def __init__(self):
super().__init__()
self.add_test('Item.listen_event', self.trigger_event)
def check_event(self, event: ValueUpdateEvent):
assert event.name == self.watch_item.name, f'Wrong name: {event.name} != {self.watch_item.name}'
assert event.value == 123, f'Wrong value: {event.value} != 123'
def set_up(self):
self.watch_item = Item.get_create_item(get_random_name('HABApp'))
self.listener = self.watch_item.listen_event(self.check_event, ValueUpdateEvent)
def tear_down(self):
self.listener.cancel()
def trigger_event(self):
self.run.at(
1, HABApp.core.EventBus.post_event, self.watch_item.name, ValueUpdateEvent(self.watch_item.name, 123)
)
with EventWaiter(self.watch_item.name, ValueUpdateEvent, 2) as w:
w.wait_for_event(value=123)
TestItemListener()
| 35.218391
| 115
| 0.655026
|
f822349f2f76c1a6990fb847019b3cbe07d92c86
| 728
|
py
|
Python
|
examples/pyfacebook_sample/urls.py
|
pose/pyfacebook-batch
|
387a1ea12e1b495a8f6c5ddaf01835d0add6c21f
|
[
"FSFAP"
] | 2
|
2016-05-09T14:18:53.000Z
|
2019-01-20T15:50:57.000Z
|
examples/pyfacebook_sample/urls.py
|
igorgue/pyfacebook
|
d06978fa930bcea33262c82f6a993bbb882fab12
|
[
"FSFAP"
] | null | null | null |
examples/pyfacebook_sample/urls.py
|
igorgue/pyfacebook
|
d06978fa930bcea33262c82f6a993bbb882fab12
|
[
"FSFAP"
] | null | null | null |
from django.conf.urls.defaults import *
# Hack to get the project name
project = __name__.split('.')[0]
# You'd want to change this to wherever your app lives
urlpatterns = patterns(project + '.pyfacebook_sample.views',
# Some functionality - users can post text to their homepage
(r'^canvas/post/', 'post'),
# For the mock AJAX functionality
(r'^canvas/ajax/', 'ajax'),
# This is the canvas callback, i.e. what will be seen
# when you visit http://apps.facebook.com/<appname>.
(r'^canvas/', 'canvas'),
# Extra callbacks can be set in the Facebook app settings
# page. For example, post_add will be called when a user
# has added the application.
(r'^post_add/', 'post_add'),
)
| 30.333333
| 64
| 0.671703
|
d3d2a083a33e33a43ee5f6cc9265a1de2a413585
| 1,364
|
py
|
Python
|
team13/svg thing/boundary.py
|
PyconUK/dojo18
|
c737f2d1028347dfaf60d0f1da4764c5582ced91
|
[
"MIT"
] | 3
|
2018-09-18T11:17:51.000Z
|
2018-09-21T19:31:04.000Z
|
team13/svg thing/boundary.py
|
PyconUK/dojo18
|
c737f2d1028347dfaf60d0f1da4764c5582ced91
|
[
"MIT"
] | 1
|
2018-09-17T20:10:43.000Z
|
2018-09-17T20:10:43.000Z
|
team13/svg thing/boundary.py
|
PyconUK/dojo18
|
c737f2d1028347dfaf60d0f1da4764c5582ced91
|
[
"MIT"
] | 13
|
2018-09-17T18:43:10.000Z
|
2018-09-17T20:21:01.000Z
|
points = [(568.2,200.2),
(560.7,187.2),
(392.3,271.1),
(348.8,195.8),
(348.8,195.8),
(345.9,190.8),
(303.3,117.1),
(284.1,128.2),
(312.9,184.8),
(232.1,242.5),
(241.1,258.1),
(325.4,209.3),
(367.8,292.4),
(360.2,483.2),
(429.8,483.2),
(415.3,301.1)]
sides = []
import random
def get_sides(points):
for i in range(len(points)):
if i != len(points):
point1 = points[i]
point2 = points[i + 1]
else:
point1 = points[i]
point2 = points[0]
m = (point1[1] - point2[1]) / (point1[0] - point2[0])
c = point1[1] - m * point1[0]
sides.append({'m': m, "c": c, 1: point1, 2: point2})
def are_intersecting(side, ray):
if side[0] == ray[0]:
return False
x = (ray['c'] - side['c']) / (side['m'] - ray['m'])
y = side['m'] * x + side['c']
if is_on_side(x, y, side):
return True
else
return False
def is_on_side(x, y, side):
if side[1][0] < side[2][0]:
if x not in range(side[1], side[2]):
return False
else
if x not in range(side[2], side[2]):
return False
if side[1][1] < side[2][1]:
if y not in range(side[1], side[2]):
return False
else
if y not in range(side[2], side[2]):
return False
def get_ray(x, y):
ray = {'m': 0, "c": y}
return ray
x = random.randint(0, 800)
y = random.randint(0, 600)
def check_in_shape(x, y, points):
| 17.947368
| 55
| 0.538123
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.