hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e2fa13878f2285c16f951786c847243a44a886a9
| 909
|
py
|
Python
|
examples/waterbodies_c3.conflux.py
|
GeoscienceAustralia/dea-conflux
|
1af77dda18190943e1368fef8deb4426662bb4a8
|
[
"Apache-2.0"
] | 1
|
2021-10-06T08:52:51.000Z
|
2021-10-06T08:52:51.000Z
|
examples/waterbodies_c3.conflux.py
|
GeoscienceAustralia/dea-conflux
|
1af77dda18190943e1368fef8deb4426662bb4a8
|
[
"Apache-2.0"
] | 14
|
2021-08-18T05:38:05.000Z
|
2022-01-14T03:04:11.000Z
|
examples/waterbodies_c3.conflux.py
|
GeoscienceAustralia/dea-conflux
|
1af77dda18190943e1368fef8deb4426662bb4a8
|
[
"Apache-2.0"
] | 1
|
2021-11-05T02:40:45.000Z
|
2021-11-05T02:40:45.000Z
|
import xarray as xr
product_name = "waterbodies_c3"
version = "0.0.1"
resampling = "nearest"
output_crs = "EPSG:3577"
resolution = (-30, 30)
input_products = {
"ga_ls_wo_3": ["water"],
}
def transform(inputs: xr.Dataset) -> xr.Dataset:
# ignore sea, terrain/low solar angle
# by disabling those flags
wofl = inputs.water & 0b11110011
# then check for wet, dry
is_wet = wofl == 128
is_ok = is_wet | (wofl == 0)
masked_wet = is_wet.where(is_ok)
return xr.Dataset({"water": masked_wet})
def summarise(inputs: xr.Dataset) -> xr.Dataset:
pc_missing = inputs.water.isnull().mean()
px_wet = pc_wet = float("nan")
if pc_missing <= 0.1:
px_wet = inputs.water.sum()
pc_wet = px_wet / inputs.water.size
return xr.Dataset(
{
"px_wet": px_wet,
"pc_wet": pc_wet,
"pc_missing": pc_missing,
}
)
| 23.921053
| 48
| 0.606161
|
699828e9f1b4310d462a6e7b666335547b95224b
| 7,028
|
py
|
Python
|
maistra/vendor/com_googlesource_chromium_v8/wee8/build/fuchsia/run_package.py
|
maistra-bot/proxy
|
05a551df62d90e96c24afc649f2755983d020b5b
|
[
"Apache-2.0"
] | null | null | null |
maistra/vendor/com_googlesource_chromium_v8/wee8/build/fuchsia/run_package.py
|
maistra-bot/proxy
|
05a551df62d90e96c24afc649f2755983d020b5b
|
[
"Apache-2.0"
] | null | null | null |
maistra/vendor/com_googlesource_chromium_v8/wee8/build/fuchsia/run_package.py
|
maistra-bot/proxy
|
05a551df62d90e96c24afc649f2755983d020b5b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Contains a helper function for deploying and executing a packaged
executable on a Target."""
from __future__ import print_function
import common
import hashlib
import logging
import multiprocessing
import os
import re
import select
import subprocess
import sys
import time
import threading
import uuid
from symbolizer import SymbolizerFilter
FAR = common.GetHostToolPathFromPlatform('far')
# Amount of time to wait for the termination of the system log output thread.
_JOIN_TIMEOUT_SECS = 5
def _AttachKernelLogReader(target):
"""Attaches a kernel log reader as a long-running SSH task."""
logging.info('Attaching kernel logger.')
return target.RunCommandPiped(['dlog', '-f'], stdin=open(os.devnull, 'r'),
stdout=subprocess.PIPE)
class MergedInputStream(object):
"""Merges a number of input streams into a UNIX pipe on a dedicated thread.
Terminates when the file descriptor of the primary stream (the first in
the sequence) is closed."""
def __init__(self, streams):
assert len(streams) > 0
self._streams = streams
self._output_stream = None
self._thread = None
def Start(self):
"""Returns a pipe to the merged output stream."""
read_pipe, write_pipe = os.pipe()
# Disable buffering for the stream to make sure there is no delay in logs.
self._output_stream = os.fdopen(write_pipe, 'w', 0)
self._thread = threading.Thread(target=self._Run)
self._thread.start();
return os.fdopen(read_pipe, 'r')
def _Run(self):
streams_by_fd = {}
primary_fd = self._streams[0].fileno()
for s in self._streams:
streams_by_fd[s.fileno()] = s
# Set when the primary FD is closed. Input from other FDs will continue to
# be processed until select() runs dry.
flush = False
# The lifetime of the MergedInputStream is bound to the lifetime of
# |primary_fd|.
while primary_fd:
# When not flushing: block until data is read or an exception occurs.
rlist, _, xlist = select.select(streams_by_fd, [], streams_by_fd)
if len(rlist) == 0 and flush:
break
for fileno in xlist:
del streams_by_fd[fileno]
if fileno == primary_fd:
primary_fd = None
for fileno in rlist:
line = streams_by_fd[fileno].readline()
if line:
self._output_stream.write(line + '\n')
else:
del streams_by_fd[fileno]
if fileno == primary_fd:
primary_fd = None
# Flush the streams by executing nonblocking reads from the input file
# descriptors until no more data is available, or all the streams are
# closed.
while streams_by_fd:
rlist, _, _ = select.select(streams_by_fd, [], [], 0)
if not rlist:
break
for fileno in rlist:
line = streams_by_fd[fileno].readline()
if line:
self._output_stream.write(line + '\n')
else:
del streams_by_fd[fileno]
def _GetComponentUri(package_name):
return 'fuchsia-pkg://fuchsia.com/%s#meta/%s.cmx' % (package_name,
package_name)
class RunPackageArgs:
"""RunPackage() configuration arguments structure.
symbolizer_config: A newline delimited list of source files contained
in the package. Omitting this parameter will disable symbolization.
system_logging: If set, connects a system log reader to the target.
"""
def __init__(self):
self.symbolizer_config = None
self.system_logging = False
@staticmethod
def FromCommonArgs(args):
run_package_args = RunPackageArgs()
run_package_args.system_logging = args.include_system_logs
return run_package_args
def _DrainStreamToStdout(stream, quit_event):
"""Outputs the contents of |stream| until |quit_event| is set."""
while not quit_event.is_set():
rlist, _, _ = select.select([ stream ], [], [], 0.1)
if rlist:
line = rlist[0].readline()
if not line:
return
print(line.rstrip())
def RunPackage(output_dir, target, package_paths, package_name,
package_args, args):
"""Installs the Fuchsia package at |package_path| on the target,
executes it with |package_args|, and symbolizes its output.
output_dir: The path containing the build output files.
target: The deployment Target object that will run the package.
package_paths: The paths to the .far packages to be installed.
package_name: The name of the primary package to run.
package_args: The arguments which will be passed to the Fuchsia process.
args: Structure of arguments to configure how the package will be run.
Returns the exit code of the remote package process."""
system_logger = (
_AttachKernelLogReader(target) if args.system_logging else None)
try:
if system_logger:
# Spin up a thread to asynchronously dump the system log to stdout
# for easier diagnoses of early, pre-execution failures.
log_output_quit_event = multiprocessing.Event()
log_output_thread = threading.Thread(
target=lambda: _DrainStreamToStdout(system_logger.stdout,
log_output_quit_event))
log_output_thread.daemon = True
log_output_thread.start()
target.InstallPackage(package_paths)
if system_logger:
log_output_quit_event.set()
log_output_thread.join(timeout=_JOIN_TIMEOUT_SECS)
logging.info('Running application.')
command = ['run', _GetComponentUri(package_name)] + package_args
process = target.RunCommandPiped(command,
stdin=open(os.devnull, 'r'),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if system_logger:
output_stream = MergedInputStream([process.stdout,
system_logger.stdout]).Start()
else:
output_stream = process.stdout
# Run the log data through the symbolizer process.
build_ids_paths = map(
lambda package_path: os.path.join(
os.path.dirname(package_path), 'ids.txt'),
package_paths)
output_stream = SymbolizerFilter(output_stream, build_ids_paths)
for next_line in output_stream:
print(next_line.rstrip())
process.wait()
if process.returncode == 0:
logging.info('Process exited normally with status code 0.')
else:
# The test runner returns an error status code if *any* tests fail,
# so we should proceed anyway.
logging.warning('Process exited with status code %d.' %
process.returncode)
finally:
if system_logger:
logging.info('Terminating kernel log reader.')
log_output_quit_event.set()
log_output_thread.join()
system_logger.kill()
return process.returncode
| 31.945455
| 78
| 0.672738
|
22b237acfb21079ac5f0465760805d7eb002ad18
| 1,741
|
py
|
Python
|
src/scout_apm/core/samplers/__init__.py
|
xiamx/scout_apm_python
|
d03dab45f65cf7d1030e11fabf6da4cf6e72ee59
|
[
"MIT"
] | null | null | null |
src/scout_apm/core/samplers/__init__.py
|
xiamx/scout_apm_python
|
d03dab45f65cf7d1030e11fabf6da4cf6e72ee59
|
[
"MIT"
] | null | null | null |
src/scout_apm/core/samplers/__init__.py
|
xiamx/scout_apm_python
|
d03dab45f65cf7d1030e11fabf6da4cf6e72ee59
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import threading
from datetime import datetime
from os import getpid
from time import sleep
from scout_apm.core.commands import ApplicationEvent
from scout_apm.core.context import AgentContext
from scout_apm.core.samplers.cpu import Cpu
from scout_apm.core.samplers.memory import Memory
logger = logging.getLogger(__name__)
class Samplers(object):
_thread_lock = threading.Semaphore()
@classmethod
def ensure_running(cls):
if cls._thread_lock.acquire(False):
th = threading.Thread(target=Samplers.run_samplers)
th.daemon = True
th.start()
cls._thread_lock.release()
@classmethod
def run_samplers(cls):
logger.debug("Starting Samplers. Acquiring samplers lock.")
try:
if cls._thread_lock.acquire(True):
logger.debug("Acquired samplers lock.")
instances = [Cpu(), Memory()]
while True:
for instance in instances:
event = ApplicationEvent()
event.event_value = instance.run()
event.event_type = (
instance.metric_type() + "/" + instance.metric_name()
)
event.timestamp = datetime.utcnow()
event.source = "Pid: " + str(getpid())
if event.event_value is not None:
AgentContext.socket().send(event)
sleep(60)
finally:
logger.debug("Shutting down samplers thread.")
cls._thread_lock.release()
| 33.480769
| 82
| 0.587593
|
69ea5233d9af8fbd6c35252774d182446c5ae15f
| 414
|
py
|
Python
|
pyglet-hg/tests/image/PIL_LA_LOAD.py
|
sangh/LaserShow
|
abc95e465e3455dc220cc602dd58358c84666f29
|
[
"BSD-3-Clause"
] | 21
|
2015-11-03T03:15:36.000Z
|
2021-03-15T22:00:47.000Z
|
tests/image/PIL_LA_LOAD.py
|
seeminglee/pyglet64
|
3dd167b5b0d3ad132a157e404586e53c2bb21736
|
[
"BSD-3-Clause"
] | 3
|
2017-09-14T14:08:28.000Z
|
2019-05-20T04:38:15.000Z
|
tests/image/PIL_LA_LOAD.py
|
seeminglee/pyglet64
|
3dd167b5b0d3ad132a157e404586e53c2bb21736
|
[
"BSD-3-Clause"
] | 23
|
2017-04-15T19:23:08.000Z
|
2020-09-08T11:55:29.000Z
|
#!/usr/bin/env python
'''Test LA load using PIL. You should see the la.png image on
a checkboard background.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import unittest
import base_load
from pyglet.image.codecs.pil import *
class TEST_PNG_LA(base_load.TestLoad):
texture_file = 'la.png'
decoder = PILImageDecoder()
if __name__ == '__main__':
unittest.main()
| 19.714286
| 64
| 0.68599
|
efc667024dfeea28f82970fb499e2eb28a6bc2a6
| 822
|
py
|
Python
|
venv/Scripts/rst2latex.py
|
unicloud-uos/uos-sdk-python
|
d5761d44af87cf79205046708acfb1882a74e133
|
[
"MIT"
] | null | null | null |
venv/Scripts/rst2latex.py
|
unicloud-uos/uos-sdk-python
|
d5761d44af87cf79205046708acfb1882a74e133
|
[
"MIT"
] | null | null | null |
venv/Scripts/rst2latex.py
|
unicloud-uos/uos-sdk-python
|
d5761d44af87cf79205046708acfb1882a74e133
|
[
"MIT"
] | null | null | null |
#!E:\workspace\python-sdk\venv\Scripts\python.exe
# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing LaTeX.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='latex', description=description)
| 30.444444
| 76
| 0.675182
|
ca8e0bdbafd219e0f630f08077577fd5241844ba
| 2,024
|
py
|
Python
|
config/settings/test.py
|
abought/locuszoom-hosted
|
5cb635b18287d15610df0da6c85b477a3eaaaabb
|
[
"MIT"
] | null | null | null |
config/settings/test.py
|
abought/locuszoom-hosted
|
5cb635b18287d15610df0da6c85b477a3eaaaabb
|
[
"MIT"
] | null | null | null |
config/settings/test.py
|
abought/locuszoom-hosted
|
5cb635b18287d15610df0da6c85b477a3eaaaabb
|
[
"MIT"
] | null | null | null |
"""
With these settings, tests run faster.
"""
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY", default="ph39zwvHUWA8J9iC4KtNed1hYuX6gTciHCUvSUjbFXaA0Cg8pdASyCPLAjUdirQY")
# https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": ""
}
}
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG # noqa F405
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = "localhost"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# Your stuff...
# ------------------------------------------------------------------------------
| 36.142857
| 113
| 0.546443
|
7b193da5a3ff5b8333d612bb49c4db100ec85adb
| 16,450
|
py
|
Python
|
pyglet/media/synthesis.py
|
mneyapo/pyglet
|
20442eecfa121c74f838cb975562b7a86eb48e35
|
[
"BSD-3-Clause"
] | null | null | null |
pyglet/media/synthesis.py
|
mneyapo/pyglet
|
20442eecfa121c74f838cb975562b7a86eb48e35
|
[
"BSD-3-Clause"
] | null | null | null |
pyglet/media/synthesis.py
|
mneyapo/pyglet
|
20442eecfa121c74f838cb975562b7a86eb48e35
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2019 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
from .codecs.base import Source, AudioFormat, AudioData
import os
import math
import ctypes
class Envelope:
"""Base class for SynthesisSource amplitude envelopes."""
def get_generator(self, sample_rate, duration):
raise NotImplementedError
class FlatEnvelope(Envelope):
"""A flat envelope, providing basic amplitude setting.
:Parameters:
`amplitude` : float
The amplitude (volume) of the wave, from 0.0 to 1.0.
Values outside of this range will be clamped.
"""
def __init__(self, amplitude=0.5):
self.amplitude = max(min(1.0, amplitude), 0)
def get_generator(self, sample_rate, duration):
amplitude = self.amplitude
while True:
yield amplitude
class LinearDecayEnvelope(Envelope):
"""A linearly decaying envelope.
This envelope linearly decays the amplitude from the peak value
to 0, over the length of the waveform.
:Parameters:
`peak` : float
The Initial peak value of the envelope, from 0.0 to 1.0.
Values outside of this range will be clamped.
"""
def __init__(self, peak=1.0):
self.peak = max(min(1.0, peak), 0)
def get_generator(self, sample_rate, duration):
peak = self.peak
total_bytes = int(sample_rate * duration)
for i in range(total_bytes):
yield (total_bytes - i) / total_bytes * peak
class ADSREnvelope(Envelope):
"""A four part Attack, Decay, Suspend, Release envelope.
This is a four part ADSR envelope. The attack, decay, and release
parameters should be provided in seconds. For example, a value of
0.1 would be 100ms. The sustain_amplitude parameter affects the
sustain volume. This defaults to a value of 0.5, but can be provided
on a scale from 0.0 to 1.0.
:Parameters:
`attack` : float
The attack time, in seconds.
`decay` : float
The decay time, in seconds.
`release` : float
The release time, in seconds.
`sustain_amplitude` : float
The sustain amplitude (volume), from 0.0 to 1.0.
"""
def __init__(self, attack, decay, release, sustain_amplitude=0.5):
self.attack = attack
self.decay = decay
self.release = release
self.sustain_amplitude = max(min(1.0, sustain_amplitude), 0)
def get_generator(self, sample_rate, duration):
sustain_amplitude = self.sustain_amplitude
total_bytes = int(sample_rate * duration)
attack_bytes = int(sample_rate * self.attack)
decay_bytes = int(sample_rate * self.decay)
release_bytes = int(sample_rate * self.release)
sustain_bytes = total_bytes - attack_bytes - decay_bytes - release_bytes
decay_step = (1 - sustain_amplitude) / decay_bytes
release_step = sustain_amplitude / release_bytes
for i in range(1, attack_bytes + 1):
yield i / attack_bytes
for i in range(1, decay_bytes + 1):
yield 1 - (i * decay_step)
for i in range(1, sustain_bytes + 1):
yield sustain_amplitude
for i in range(1, release_bytes + 1):
yield sustain_amplitude - (i * release_step)
class TremoloEnvelope(Envelope):
"""A tremolo envelope, for modulation amplitude.
A tremolo envelope that modulates the amplitude of the
waveform with a sinusoidal pattern. The depth and rate
of modulation can be specified. Depth is calculated as
a percentage of the maximum amplitude. For example:
a depth of 0.2 and amplitude of 0.5 will fluctuate
the amplitude between 0.4 an 0.5.
:Parameters:
`depth` : float
The amount of fluctuation, from 0.0 to 1.0.
`rate` : float
The fluctuation frequency, in seconds.
`amplitude` : float
The peak amplitude (volume), from 0.0 to 1.0.
"""
def __init__(self, depth, rate, amplitude=0.5):
self.depth = max(min(1.0, depth), 0)
self.rate = rate
self.amplitude = max(min(1.0, amplitude), 0)
def get_generator(self, sample_rate, duration):
total_bytes = int(sample_rate * duration)
period = total_bytes / duration
max_amplitude = self.amplitude
min_amplitude = max(0.0, (1.0 - self.depth) * self.amplitude)
step = (math.pi * 2) / period / self.rate
for i in range(total_bytes):
value = math.sin(step * i)
yield value * (max_amplitude - min_amplitude) + min_amplitude
class SynthesisSource(Source):
"""Base class for synthesized waveforms.
:Parameters:
`duration` : float
The length, in seconds, of audio that you wish to generate.
`sample_rate` : int
Audio samples per second. (CD quality is 44100).
`sample_size` : int
The bit precision. Must be either 8 or 16.
"""
def __init__(self, duration, sample_rate=44800, sample_size=16, envelope=None):
self._duration = float(duration)
self.audio_format = AudioFormat(
channels=1,
sample_size=sample_size,
sample_rate=sample_rate)
self._offset = 0
self._sample_rate = sample_rate
self._sample_size = sample_size
self._bytes_per_sample = sample_size >> 3
self._bytes_per_second = self._bytes_per_sample * sample_rate
self._max_offset = int(self._bytes_per_second * self._duration)
self.envelope = envelope or FlatEnvelope(amplitude=1.0)
self._envelope_generator = self.envelope.get_generator(sample_rate, duration)
if self._bytes_per_sample == 2:
self._max_offset &= 0xfffffffe
def get_audio_data(self, num_bytes, compensation_time=0.0):
"""Return `num_bytes` bytes of audio data."""
num_bytes = min(num_bytes, self._max_offset - self._offset)
if num_bytes <= 0:
return None
timestamp = float(self._offset) / self._bytes_per_second
duration = float(num_bytes) / self._bytes_per_second
data = self._generate_data(num_bytes)
self._offset += num_bytes
return AudioData(data, num_bytes, timestamp, duration, [])
def _generate_data(self, num_bytes):
"""Generate `num_bytes` bytes of data.
Return data as ctypes array or string.
"""
raise NotImplementedError('abstract')
def seek(self, timestamp):
self._offset = int(timestamp * self._bytes_per_second)
# Bound within duration
self._offset = min(max(self._offset, 0), self._max_offset)
# Align to sample
if self._bytes_per_sample == 2:
self._offset &= 0xfffffffe
self._envelope_generator = self.envelope.get_generator(self._sample_rate, self._duration)
class Silence(SynthesisSource):
"""A silent waveform."""
def _generate_data(self, num_bytes):
if self._bytes_per_sample == 1:
return b'\127' * num_bytes
else:
return b'\0' * num_bytes
class WhiteNoise(SynthesisSource):
"""A white noise, random waveform."""
def _generate_data(self, num_bytes):
return os.urandom(num_bytes)
class Sine(SynthesisSource):
"""A sinusoid (sine) waveform.
:Parameters:
`duration` : float
The length, in seconds, of audio that you wish to generate.
`frequency` : int
The frequency, in Hz of the waveform you wish to produce.
`sample_rate` : int
Audio samples per second. (CD quality is 44100).
`sample_size` : int
The bit precision. Must be either 8 or 16.
"""
def __init__(self, duration, frequency=440, **kwargs):
super(Sine, self).__init__(duration, **kwargs)
self.frequency = frequency
def _generate_data(self, num_bytes):
if self._bytes_per_sample == 1:
samples = num_bytes
bias = 127
amplitude = 127
data = (ctypes.c_ubyte * samples)()
else:
samples = num_bytes >> 1
bias = 0
amplitude = 32767
data = (ctypes.c_short * samples)()
step = self.frequency * (math.pi * 2) / self.audio_format.sample_rate
envelope = self._envelope_generator
for i in range(samples):
data[i] = int(math.sin(step * i) * amplitude * next(envelope) + bias)
return data
class Triangle(SynthesisSource):
"""A triangle waveform.
:Parameters:
`duration` : float
The length, in seconds, of audio that you wish to generate.
`frequency` : int
The frequency, in Hz of the waveform you wish to produce.
`sample_rate` : int
Audio samples per second. (CD quality is 44100).
`sample_size` : int
The bit precision. Must be either 8 or 16.
"""
def __init__(self, duration, frequency=440, **kwargs):
super(Triangle, self).__init__(duration, **kwargs)
self.frequency = frequency
def _generate_data(self, num_bytes):
if self._bytes_per_sample == 1:
samples = num_bytes
value = 127
maximum = 255
minimum = 0
data = (ctypes.c_ubyte * samples)()
else:
samples = num_bytes >> 1
value = 0
maximum = 32767
minimum = -32768
data = (ctypes.c_short * samples)()
step = (maximum - minimum) * 2 * self.frequency / self.audio_format.sample_rate
envelope = self._envelope_generator
for i in range(samples):
value += step
if value > maximum:
value = maximum - (value - maximum)
step = -step
if value < minimum:
value = minimum - (value - minimum)
step = -step
data[i] = int(value * next(envelope))
return data
class Sawtooth(SynthesisSource):
"""A sawtooth waveform.
:Parameters:
`duration` : float
The length, in seconds, of audio that you wish to generate.
`frequency` : int
The frequency, in Hz of the waveform you wish to produce.
`sample_rate` : int
Audio samples per second. (CD quality is 44100).
`sample_size` : int
The bit precision. Must be either 8 or 16.
"""
def __init__(self, duration, frequency=440, **kwargs):
super(Sawtooth, self).__init__(duration, **kwargs)
self.frequency = frequency
def _generate_data(self, num_bytes):
if self._bytes_per_sample == 1:
samples = num_bytes
value = 127
maximum = 255
minimum = 0
data = (ctypes.c_ubyte * samples)()
else:
samples = num_bytes >> 1
value = 0
maximum = 32767
minimum = -32768
data = (ctypes.c_short * samples)()
step = (maximum - minimum) * self.frequency / self._sample_rate
envelope = self._envelope_generator
for i in range(samples):
value += step
if value > maximum:
value = minimum + (value % maximum)
data[i] = int(value * next(envelope))
return data
class Square(SynthesisSource):
"""A square (pulse) waveform.
:Parameters:
`duration` : float
The length, in seconds, of audio that you wish to generate.
`frequency` : int
The frequency, in Hz of the waveform you wish to produce.
`sample_rate` : int
Audio samples per second. (CD quality is 44100).
`sample_size` : int
The bit precision. Must be either 8 or 16.
"""
def __init__(self, duration, frequency=440, **kwargs):
super(Square, self).__init__(duration, **kwargs)
self.frequency = frequency
def _generate_data(self, num_bytes):
if self._bytes_per_sample == 1:
samples = num_bytes
bias = 127
amplitude = 127
data = (ctypes.c_ubyte * samples)()
else:
samples = num_bytes >> 1
bias = 0
amplitude = 32767
data = (ctypes.c_short * samples)()
half_period = self.audio_format.sample_rate / self.frequency / 2
envelope = self._envelope_generator
value = 1
count = 0
for i in range(samples):
if count >= half_period:
value = -value
count %= half_period
count += 1
data[i] = int(value * amplitude * next(envelope) + bias)
return data
class FM(SynthesisSource):
"""A simple FM waveform.
This is a simplistic frequency modulated waveform, based on the
concepts by John Chowning. Basic sine waves are used for both
frequency carrier and modulator inputs, of which the frequencies can
be provided. The modulation index, or amplitude, can also be adjusted.
:Parameters:
`duration` : float
The length, in seconds, of audio that you wish to generate.
`carrier` : int
The carrier frequency, in Hz.
`modulator` : int
The modulator frequency, in Hz.
`mod_index` : int
The modulation index.
`sample_rate` : int
Audio samples per second. (CD quality is 44100).
`sample_size` : int
The bit precision. Must be either 8 or 16.
"""
def __init__(self, duration, carrier=440, modulator=440, mod_index=1, **kwargs):
super(FM, self).__init__(duration, **kwargs)
self.carrier = carrier
self.modulator = modulator
self.mod_index = mod_index
def _generate_data(self, num_bytes):
if self._bytes_per_sample == 1:
samples = num_bytes
bias = 127
amplitude = 127
data = (ctypes.c_ubyte * samples)()
else:
samples = num_bytes >> 1
bias = 0
amplitude = 32767
data = (ctypes.c_short * samples)()
car_step = 2 * math.pi * self.carrier
mod_step = 2 * math.pi * self.modulator
mod_index = self.mod_index
sample_rate = self._sample_rate
envelope = self._envelope_generator
sin = math.sin
# FM equation: sin((2 * pi * carrier) + sin(2 * pi * modulator))
for i in range(samples):
increment = i / sample_rate
data[i] = int(sin(car_step * increment + mod_index * sin(mod_step * increment))
* amplitude * next(envelope) + bias)
return data
| 35.074627
| 97
| 0.610699
|
f7b311c49c9a1decb15829888dbe3ec6570a6847
| 1,308
|
py
|
Python
|
code/model/metrics.py
|
chenyangjun45/Mutimode-language-generation
|
e8fa0379768e2a1cb7dca70eceeac334b605a4e8
|
[
"MIT"
] | 5
|
2020-10-22T01:25:47.000Z
|
2020-12-21T10:38:46.000Z
|
code/model/metrics.py
|
woyaonidsh/Mutimode
|
42cbcddb472f0f162ff546ee1107ee26b5c5e47e
|
[
"MIT"
] | 1
|
2021-04-15T02:35:48.000Z
|
2021-04-15T13:17:48.000Z
|
code/model/metrics.py
|
woyaonidsh/Mutimode
|
42cbcddb472f0f162ff546ee1107ee26b5c5e47e
|
[
"MIT"
] | 1
|
2021-04-14T12:13:58.000Z
|
2021-04-14T12:13:58.000Z
|
import jieba
# 使用jieba进行分词
def Rouge_1(model, reference): # terms_reference为参考摘要,terms_model为候选摘要 ***one-gram*** 一元模型
terms_reference = jieba.cut(reference) # 默认精准模式
terms_model = jieba.cut(model)
grams_reference = list(terms_reference)
grams_model = list(terms_model)
temp = 0
ngram_all = len(grams_reference)
for x in grams_reference:
if x in grams_model: temp = temp + 1
rouge_1 = temp / ngram_all
return rouge_1
def Rouge_2(model, reference): # terms_reference为参考摘要,terms_model为候选摘要 ***Bi-gram*** 2元模型
terms_reference = jieba.cut(reference)
terms_model = jieba.cut(model)
grams_reference = list(terms_reference)
grams_model = list(terms_model)
gram_2_model = []
gram_2_reference = []
temp = 0
ngram_all = len(grams_reference) - 1
for x in range(len(grams_model) - 1):
gram_2_model.append(grams_model[x] + grams_model[x + 1])
for x in range(len(grams_reference) - 1):
gram_2_reference.append(grams_reference[x] + grams_reference[x + 1])
for x in gram_2_model:
if x in gram_2_reference: temp = temp + 1
rouge_2 = temp / ngram_all
return rouge_2
def Rouge(model, reference):
return Rouge_1(model, reference), Rouge_2(model, reference)
# Rouge("我的世界是光明的","光明给我的世界以力量")
| 31.902439
| 93
| 0.688838
|
41ed1751ffd3b34c6f87ffb56a0afe1528568c44
| 1,383
|
py
|
Python
|
tests/test_timeseries.py
|
ndejong/vaping
|
995b424990544ea426cbef07baa3896e8cbfb87b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_timeseries.py
|
ndejong/vaping
|
995b424990544ea426cbef07baa3896e8cbfb87b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_timeseries.py
|
ndejong/vaping
|
995b424990544ea426cbef07baa3896e8cbfb87b
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import time
import vaping.plugins
from vaping import plugin
config = {
"type" : "test_tsdb",
"filename" : "{a}-{b}-{field}",
"field" : "test"
}
@plugin.register("test_tsdb")
class TSDBTestPlugin(vaping.plugins.TimeSeriesDB):
"""
Test plugin from the TimeSeriesDB abstraction
"""
def __init__(self, config, ctx):
super(TSDBTestPlugin, self).__init__(config, ctx)
self.updated = {}
def create(self, filename):
self.created = True
def update(self, filename, time, value):
self.updated[filename] = (time, value)
def test_filename_format():
"""
test filename formatting from data
"""
inst = plugin.get_instance(config, None)
assert inst.format_filename({}, {"a":"first", "b":"second"}) == "first-second-test"
def test_update_and_create():
"""
test that update() and create() are called accordingly during
emit()
"""
inst = plugin.get_instance(config, None)
t = time.time()
inst.emit({
"type" : "test",
"source" : "test_update_and_create",
"ts" : t,
"data" : [
{ "test" : 123, "a" : "row", "b" : "1" },
{ "test" : 456, "a" : "row", "b" : "2" }
]
})
assert inst.created == True
assert inst.updated["row-1-test"] == (t, 123)
assert inst.updated["row-2-test"] == (t, 456)
| 24.263158
| 87
| 0.574837
|
130487809f0a7d08d54949133fbdd8d156a43a67
| 1,577
|
py
|
Python
|
ccfx/scripts/utility.py
|
PerEr/ccfinderx-core
|
749dd1fc114290821dbc35e71fa331a1f7999466
|
[
"MIT"
] | 33
|
2016-03-20T07:35:27.000Z
|
2022-01-19T08:48:02.000Z
|
ccfx/scripts/utility.py
|
PerEr/ccfinderx-core
|
749dd1fc114290821dbc35e71fa331a1f7999466
|
[
"MIT"
] | 5
|
2016-03-14T07:50:15.000Z
|
2022-03-31T09:43:24.000Z
|
ccfx/scripts/utility.py
|
PerEr/ccfinderx-core
|
749dd1fc114290821dbc35e71fa331a1f7999466
|
[
"MIT"
] | 17
|
2015-01-12T15:51:11.000Z
|
2022-02-20T01:22:42.000Z
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright: This module has been placed in the public domain.
import sys
class ProgressReporter(object):
def __init__(self, totalAmount):
if totalAmount == 0:
self.silence = True
return
self.silence = False
self.totalAmount = totalAmount
self.currentValue = 0
self.resolution = 80
self.outp = sys.stderr
def proceed(self, currentValue):
if self.silence: return
if currentValue > self.currentValue:
lastTick = self.currentValue * self.resolution // self.totalAmount
curTick = currentValue * self.resolution // self.totalAmount
for i in range(lastTick, curTick):
if i % (self.resolution/5) == 0:
self.outp.write("%d%%" % (100 * i // self.resolution))
if i % 2 == 0:
s = '.'
else:
s = '\b:'
self.outp.write(s)
self.outp.flush()
self.currentValue = currentValue
def done(self):
if self.silence: return
if self.totalAmount == 0:
return
self.proceed(self.totalAmount)
curTick = self.currentValue * self.resolution // self.totalAmount
if curTick % (self.resolution/5) == 0:
self.outp.write("%d%%" % (100 * curTick // self.resolution))
self.outp.write("\n")
self.outp.flush()
def abort(self):
if self.silence: return
if self.totalAmount == 0:
return
self.outp.write("\n")
self.outp.flush()
def escapeCommandline(args):
r = list()
for a in args[:]:
if ' ' in a:
r.append('"%s"' % a)
else:
r.append(a)
return r
| 25.435484
| 70
| 0.609385
|
eca17477bcb9123fe0a8b7f9126d8ae943edaa34
| 8,481
|
py
|
Python
|
facemesh_tracker.py
|
nralbrecht/facemesh-tracker
|
57faf6f3e1fc0d8393d3a9a0739c15b4804f194e
|
[
"MIT"
] | null | null | null |
facemesh_tracker.py
|
nralbrecht/facemesh-tracker
|
57faf6f3e1fc0d8393d3a9a0739c15b4804f194e
|
[
"MIT"
] | null | null | null |
facemesh_tracker.py
|
nralbrecht/facemesh-tracker
|
57faf6f3e1fc0d8393d3a9a0739c15b4804f194e
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import mediapipe as mp
from scipy.spatial.transform import Rotation
mp_face_mesh = mp.solutions.face_mesh
class FaceMeshTracker:
def __init__(self, server, preview=None):
self.server = server
self.preview = preview
self.capture = cv2.VideoCapture(0)
self.face_mesh = mp_face_mesh.FaceMesh(
max_num_faces=1,
refine_landmarks=True,
min_detection_confidence=0.5,
min_tracking_confidence=0.5)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.capture:
self.capture.release()
if self.face_mesh:
self.face_mesh.close()
def start(self):
while self.capture.isOpened():
success, image = self.capture.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use "break" instead of "continue".
continue
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = self.face_mesh.process(image)
if results.multi_face_landmarks:
selected_face = results.multi_face_landmarks[0]
face_features, orientation, rotation = self.process_face(selected_face)
pivot = self.get_face_pivot(face_features)
self.server.send_update(pivot, rotation)
if self.preview and self.preview.show_image(image, selected_face, face_features, orientation, rotation, pivot):
break
else:
if self.preview and self.preview.show_image(image):
break
def process_face(self, face_landmarks):
face_features = self.extract_features(face_landmarks)
orientation = self.extract_orientation(face_features)
rotation = self.euler_angles_from_orientation(orientation)
return face_features, orientation, rotation
def extract_features(self, face_landmarks):
return {
"forehead": self.average_landmark([
face_landmarks.landmark[67],
face_landmarks.landmark[69],
face_landmarks.landmark[66],
face_landmarks.landmark[109],
face_landmarks.landmark[108],
face_landmarks.landmark[107],
face_landmarks.landmark[10],
face_landmarks.landmark[151],
face_landmarks.landmark[9],
face_landmarks.landmark[338],
face_landmarks.landmark[337],
face_landmarks.landmark[336],
face_landmarks.landmark[297],
face_landmarks.landmark[299],
face_landmarks.landmark[296]
]),
"nose": self.average_landmark([
face_landmarks.landmark[220],
face_landmarks.landmark[237],
face_landmarks.landmark[51],
face_landmarks.landmark[45],
face_landmarks.landmark[44],
face_landmarks.landmark[5],
face_landmarks.landmark[4],
face_landmarks.landmark[1],
face_landmarks.landmark[281],
face_landmarks.landmark[275],
face_landmarks.landmark[274],
face_landmarks.landmark[440],
face_landmarks.landmark[457]
]),
"right_cheek": self.average_landmark([
face_landmarks.landmark[127],
face_landmarks.landmark[234],
face_landmarks.landmark[93],
face_landmarks.landmark[132],
face_landmarks.landmark[34],
face_landmarks.landmark[227],
face_landmarks.landmark[137],
face_landmarks.landmark[177],
face_landmarks.landmark[215],
face_landmarks.landmark[116],
face_landmarks.landmark[123],
face_landmarks.landmark[147],
face_landmarks.landmark[213],
face_landmarks.landmark[50],
face_landmarks.landmark[18],
]),
"left_cheek": self.average_landmark([
face_landmarks.landmark[280],
face_landmarks.landmark[411],
face_landmarks.landmark[345],
face_landmarks.landmark[352],
face_landmarks.landmark[376],
face_landmarks.landmark[433],
face_landmarks.landmark[264],
face_landmarks.landmark[447],
face_landmarks.landmark[366],
face_landmarks.landmark[401],
face_landmarks.landmark[435],
face_landmarks.landmark[356],
face_landmarks.landmark[454],
face_landmarks.landmark[323],
face_landmarks.landmark[361]
]),
"chin": self.average_landmark([
face_landmarks.landmark[194],
face_landmarks.landmark[32],
face_landmarks.landmark[140],
face_landmarks.landmark[176],
face_landmarks.landmark[201],
face_landmarks.landmark[208],
face_landmarks.landmark[171],
face_landmarks.landmark[148],
face_landmarks.landmark[200],
face_landmarks.landmark[199],
face_landmarks.landmark[175],
face_landmarks.landmark[152],
face_landmarks.landmark[421],
face_landmarks.landmark[428],
face_landmarks.landmark[396],
face_landmarks.landmark[377],
face_landmarks.landmark[418],
face_landmarks.landmark[262],
face_landmarks.landmark[369],
face_landmarks.landmark[400],
])
}
def extract_orientation(self, features):
vertical_plane = self.plane_from_points(features["forehead"], features["nose"], features["chin"])
horizontal_plane = self.plane_from_points(features["left_cheek"], features["nose"], features["right_cheek"])
forward = self.get_plane_plane_intersection(vertical_plane, horizontal_plane)[0]
up = self.normalize(np.cross((features["right_cheek"] - features["nose"]), (features["left_cheek"] - features["nose"])))
right = self.normalize(np.cross((features["chin"] - features["nose"]), (features["forehead"] - features["nose"])))
return (forward, up, right)
def euler_angles_from_orientation(self, orientation):
forward, up, right = orientation
return Rotation.from_matrix(np.array([
right,
up,
forward
])).as_euler("yxz", degrees=True)
def average_landmark(self, landmarks):
average_point = np.zeros(3)
for point in landmarks:
average_point[0] += point.x
average_point[1] += point.y
average_point[2] += point.z
return average_point / len(landmarks)
def plane_from_points(self, p1, p2, p3):
# https://kitchingroup.cheme.cmu.edu/blog/2015/01/18/Equation-of-a-plane-through-three-points/
# These two vectors are in the plane
v1 = p3 - p1
v2 = p2 - p1
# the cross product is a vector normal to the plane
cp = np.cross(v1, v2)
a, b, c = cp
# This evaluates a * x3 + b * y3 + c * z3 which equals d
d = np.dot(cp, p3)
# print("The equation is {0}x + {1}y + {2}z + {3} = 0".format(a, b, c, -d))
return np.array([a, b, c, -d])
def get_plane_plane_intersection(self, A, B):
# https://gist.github.com/marmakoide/79f361dd613f2076ece544070ddae6ab
U = self.normalize(np.cross(A[:-1], B[:-1]))
Ao = np.array((A[:-1], B[:-1], U))
Bo = np.array((-A[-1], -B[-1], 0.))
return U, np.linalg.solve(Ao, Bo)
def normalize(self, vector):
return vector / np.linalg.norm(vector)
def get_face_pivot(self, features):
return np.array([
(features["nose"][0] - 0.5) * -50,
(features["nose"][1] - 0.5) * -50,
(features["nose"][2] + 0.035) * 500
])
| 36.089362
| 128
| 0.569626
|
5728a37fc33467968ca68de316d963f31f66da03
| 7,841
|
py
|
Python
|
python/paddle/dataset/flowers.py
|
ysh329/Paddle
|
50ad9046c9a440564d104eaa354eb9df83a35678
|
[
"Apache-2.0"
] | 1
|
2018-09-06T06:12:20.000Z
|
2018-09-06T06:12:20.000Z
|
python/paddle/dataset/flowers.py
|
ysh329/Paddle
|
50ad9046c9a440564d104eaa354eb9df83a35678
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/dataset/flowers.py
|
ysh329/Paddle
|
50ad9046c9a440564d104eaa354eb9df83a35678
|
[
"Apache-2.0"
] | 1
|
2022-03-26T11:44:12.000Z
|
2022-03-26T11:44:12.000Z
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module will download dataset from
http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html
and parse train/test set intopaddle reader creators.
This set contains images of flowers belonging to 102 different categories.
The images were acquired by searching the web and taking pictures. There are a
minimum of 40 images for each category.
The database was used in:
Nilsback, M-E. and Zisserman, A. Automated flower classification over a large
number of classes.Proceedings of the Indian Conference on Computer Vision,
Graphics and Image Processing (2008)
http://www.robots.ox.ac.uk/~vgg/publications/papers/nilsback08.{pdf,ps.gz}.
"""
from __future__ import print_function
import itertools
import functools
from .common import download
import tarfile
import scipy.io as scio
from paddle.dataset.image import *
from paddle.reader import *
from paddle import compat as cpt
import os
import numpy as np
from multiprocessing import cpu_count
import six
from six.moves import cPickle as pickle
__all__ = ['train', 'test', 'valid']
DATA_URL = 'http://paddlemodels.bj.bcebos.com/flowers/102flowers.tgz'
LABEL_URL = 'http://paddlemodels.bj.bcebos.com/flowers/imagelabels.mat'
SETID_URL = 'http://paddlemodels.bj.bcebos.com/flowers/setid.mat'
DATA_MD5 = '52808999861908f626f3c1f4e79d11fa'
LABEL_MD5 = 'e0620be6f572b9609742df49c70aed4d'
SETID_MD5 = 'a5357ecc9cb78c4bef273ce3793fc85c'
# In official 'readme', tstid is the flag of test data
# and trnid is the flag of train data. But test data is more than train data.
# So we exchange the train data and test data.
TRAIN_FLAG = 'tstid'
TEST_FLAG = 'trnid'
VALID_FLAG = 'valid'
def default_mapper(is_train, sample):
'''
map image bytes data to type needed by model input layer
'''
img, label = sample
img = load_image_bytes(img)
img = simple_transform(
img, 256, 224, is_train, mean=[103.94, 116.78, 123.68])
return img.flatten().astype('float32'), label
train_mapper = functools.partial(default_mapper, True)
test_mapper = functools.partial(default_mapper, False)
def reader_creator(data_file,
label_file,
setid_file,
dataset_name,
mapper,
buffered_size=1024,
use_xmap=True,
cycle=False):
'''
1. read images from tar file and
merge images into batch files in 102flowers.tgz_batch/
2. get a reader to read sample from batch file
:param data_file: downloaded data file
:type data_file: string
:param label_file: downloaded label file
:type label_file: string
:param setid_file: downloaded setid file containing information
about how to split dataset
:type setid_file: string
:param dataset_name: data set name (tstid|trnid|valid)
:type dataset_name: string
:param mapper: a function to map image bytes data to type
needed by model input layer
:type mapper: callable
:param buffered_size: the size of buffer used to process images
:type buffered_size: int
:param cycle: whether to cycle through the dataset
:type cycle: bool
:return: data reader
:rtype: callable
'''
labels = scio.loadmat(label_file)['labels'][0]
indexes = scio.loadmat(setid_file)[dataset_name][0]
img2label = {}
for i in indexes:
img = "jpg/image_%05d.jpg" % i
img2label[img] = labels[i - 1]
file_list = batch_images_from_tar(data_file, dataset_name, img2label)
def reader():
while True:
for file in open(file_list):
file = file.strip()
batch = None
with open(file, 'rb') as f:
if six.PY2:
batch = pickle.load(f)
else:
batch = pickle.load(f, encoding='bytes')
if six.PY3:
batch = cpt.to_text(batch)
data = batch['data']
labels = batch['label']
for sample, label in six.moves.zip(data, batch['label']):
yield sample, int(label) - 1
if not cycle:
break
if use_xmap:
cpu_num = int(os.environ.get('CPU_NUM', cpu_count()))
return xmap_readers(mapper, reader, cpu_num, buffered_size)
else:
return map_readers(mapper, reader)
def train(mapper=train_mapper, buffered_size=1024, use_xmap=True, cycle=False):
'''
Create flowers training set reader.
It returns a reader, each sample in the reader is
image pixels in [0, 1] and label in [1, 102]
translated from original color image by steps:
1. resize to 256*256
2. random crop to 224*224
3. flatten
:param mapper: a function to map sample.
:type mapper: callable
:param buffered_size: the size of buffer used to process images
:type buffered_size: int
:param cycle: whether to cycle through the dataset
:type cycle: bool
:return: train data reader
:rtype: callable
'''
return reader_creator(
download(DATA_URL, 'flowers', DATA_MD5),
download(LABEL_URL, 'flowers', LABEL_MD5),
download(SETID_URL, 'flowers', SETID_MD5),
TRAIN_FLAG,
mapper,
buffered_size,
use_xmap,
cycle=cycle)
def test(mapper=test_mapper, buffered_size=1024, use_xmap=True, cycle=False):
'''
Create flowers test set reader.
It returns a reader, each sample in the reader is
image pixels in [0, 1] and label in [1, 102]
translated from original color image by steps:
1. resize to 256*256
2. random crop to 224*224
3. flatten
:param mapper: a function to map sample.
:type mapper: callable
:param buffered_size: the size of buffer used to process images
:type buffered_size: int
:param cycle: whether to cycle through the dataset
:type cycle: bool
:return: test data reader
:rtype: callable
'''
return reader_creator(
download(DATA_URL, 'flowers', DATA_MD5),
download(LABEL_URL, 'flowers', LABEL_MD5),
download(SETID_URL, 'flowers', SETID_MD5),
TEST_FLAG,
mapper,
buffered_size,
use_xmap,
cycle=cycle)
def valid(mapper=test_mapper, buffered_size=1024, use_xmap=True):
'''
Create flowers validation set reader.
It returns a reader, each sample in the reader is
image pixels in [0, 1] and label in [1, 102]
translated from original color image by steps:
1. resize to 256*256
2. random crop to 224*224
3. flatten
:param mapper: a function to map sample.
:type mapper: callable
:param buffered_size: the size of buffer used to process images
:type buffered_size: int
:return: test data reader
:rtype: callable
'''
return reader_creator(
download(DATA_URL, 'flowers', DATA_MD5),
download(LABEL_URL, 'flowers', LABEL_MD5),
download(SETID_URL, 'flowers', SETID_MD5), VALID_FLAG, mapper,
buffered_size, use_xmap)
def fetch():
download(DATA_URL, 'flowers', DATA_MD5)
download(LABEL_URL, 'flowers', LABEL_MD5)
download(SETID_URL, 'flowers', SETID_MD5)
| 34.240175
| 79
| 0.668282
|
76cdd5b467696ffdd8e68a6faec27c1082360c77
| 216
|
py
|
Python
|
Curso_em_Video_Exercicios/ex003.py
|
Cohuzer/Exercicios-do-Curso-em-Video
|
879cbb53c54ba226e12d9972bc28eadcd521fc10
|
[
"MIT"
] | null | null | null |
Curso_em_Video_Exercicios/ex003.py
|
Cohuzer/Exercicios-do-Curso-em-Video
|
879cbb53c54ba226e12d9972bc28eadcd521fc10
|
[
"MIT"
] | null | null | null |
Curso_em_Video_Exercicios/ex003.py
|
Cohuzer/Exercicios-do-Curso-em-Video
|
879cbb53c54ba226e12d9972bc28eadcd521fc10
|
[
"MIT"
] | null | null | null |
#Some dois números informados pelo usuário
#Entrada
a = float(input('Me diga um número: '))
b = float(input('Me diga outro número: '))
#Processamento
c = a + b
#Saida
print('A soma de {} e {} é {}'.format(a,b,c))
| 18
| 45
| 0.648148
|
40bd20f3104dbe53f49ee56f6338741699b5c969
| 4,024
|
py
|
Python
|
src/rest_api.py
|
dprslt/aurora
|
3b70c036f01d9b69ae49559c404da79594530041
|
[
"Apache-2.0"
] | null | null | null |
src/rest_api.py
|
dprslt/aurora
|
3b70c036f01d9b69ae49559c404da79594530041
|
[
"Apache-2.0"
] | 17
|
2021-05-08T06:52:03.000Z
|
2021-10-30T16:49:48.000Z
|
src/rest_api.py
|
dprslt/aurora
|
3b70c036f01d9b69ae49559c404da79594530041
|
[
"Apache-2.0"
] | null | null | null |
import logging
import config
from flask import Flask, request, abort, Response
from multiprocessing import Process
from strategies.colors.FixedColor import FixedColor
from strategies.light.Breath import Breath
from strategies.light.SimpleColor import SimpleColor
from strategies.light.TimedWrapper import TimedWrapper
from strategies.light.TurnedOff import TurnedOff
from werkzeug.serving import make_server
import threading
from strategies.screen.DisplayScrollingMessage import DisplayScrollingMessage
from strategies.screen.QuickTime import QuickTime
from strategies.screen.RealClockTime import RealClockTime
def get_color_from_query(args):
r = int(request.args.get('r', None))
g = int(request.args.get('g', None))
b = int(request.args.get('b', None))
return FixedColor([r, g, b])
class ServerThread(threading.Thread):
def __init__(self, app):
threading.Thread.__init__(self)
self.server = make_server('0.0.0.0', 5000, app)
self.ctx = app.app_context()
self.ctx.push()
def run(self):
logging.info('Starting web server')
self.server.serve_forever()
def shutdown(self):
logging.info('Stopping web server')
self.server.shutdown()
def start_rest_server(light, disp):
app = Flask(__name__)
@app.route("/ping")
def ping():
return 'pong'
@app.route("/light/<mode>", methods=['POST'])
def switch_light_mode(mode):
if(mode == 'on'):
config.scheduler.set_light_thread(
SimpleColor(light, FixedColor([255, 172, 68])))
if(mode == 'off'):
config.scheduler.set_light_thread(TurnedOff(light))
return 'OK'
@app.route("/light/color", methods=['POST'])
def display_light_color():
color_strategy = None
try:
color_strategy = get_color_from_query(request.args)
except:
abort(Response('r, g and b query params are mandatory and must be valid numbers given : r:' +
str(r)+', g:'+str(g)+', b:'+str(b), 400))
color_thread = SimpleColor(light, color_strategy)
requestedTime = int(request.args.get('time', -1))
if(requestedTime > 0):
wrapped_thread = TimedWrapper(color_thread, requestedTime)
config.scheduler.temporary_set_light_thread(wrapped_thread)
else:
config.scheduler.set_light_thread(color_thread)
return 'OK'
@app.route("/light/breath", methods=['POST'])
def display_light_breath():
config.scheduler.set_light_thread(Breath(
light,
hue=0.08,
sat=0.9,
value_target=0.99,
value_from=0.1,
duration=[2.5, 2.5],
pauses=[0.05, 0.8],
frequency=40
))
return 'OK'
@app.route("/display/text/<text>", methods=['POST'])
def display_custom_text(text):
color = None
duration = len(text) / 3 + 1
try:
color = get_color_from_query(request.args)
finally:
if(color):
config.scheduler.temporary_switch_screen_thread(
DisplayScrollingMessage(
disp, text, duration, screen_color_strategy=color)
)
else:
config.scheduler.temporary_switch_screen_thread(
DisplayScrollingMessage(
disp, text, duration)
)
return 'OK'
@app.route("/display/date", methods=['POST'])
def display_date():
config.scheduler.set_screen_thread(RealClockTime(disp))
return 'OK'
@app.route("/display/date/quick", methods=['POST'])
def display_quick_date():
config.scheduler.set_screen_thread(QuickTime(disp))
return 'OK'
server = ServerThread(app)
server.start()
return server
# server_thread = Process(target=app.run, kwargs=dict(host='0.0.0.0'))
# server_thread.start()
# return server_thread
| 31.4375
| 105
| 0.621769
|
44dff2c0782ca83e6e49723abc5c295d39aacf94
| 664
|
py
|
Python
|
backend/task_profile/api/v1/serializers.py
|
crowdbotics-apps/privet-albom-28579
|
31de761497946b3932b11bae5d6b6216120f6634
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/task_profile/api/v1/serializers.py
|
crowdbotics-apps/privet-albom-28579
|
31de761497946b3932b11bae5d6b6216120f6634
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/task_profile/api/v1/serializers.py
|
crowdbotics-apps/privet-albom-28579
|
31de761497946b3932b11bae5d6b6216120f6634
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from rest_framework import serializers
from task_profile.models import TaskerProfile, CustomerProfile, Notification, InviteCode
class NotificationSerializer(serializers.ModelSerializer):
class Meta:
model = Notification
fields = "__all__"
class InviteCodeSerializer(serializers.ModelSerializer):
class Meta:
model = InviteCode
fields = "__all__"
class CustomerProfileSerializer(serializers.ModelSerializer):
class Meta:
model = CustomerProfile
fields = "__all__"
class TaskerProfileSerializer(serializers.ModelSerializer):
class Meta:
model = TaskerProfile
fields = "__all__"
| 24.592593
| 88
| 0.730422
|
006cd144093c73acdd634da996be33ee5e33e518
| 4,690
|
py
|
Python
|
Glove/3/train.py
|
amurto/nlp-scripts
|
f1158221a87838589ccff4eaa4db5349e6ec1c8d
|
[
"MIT"
] | 1
|
2020-06-15T05:46:42.000Z
|
2020-06-15T05:46:42.000Z
|
Glove/3/train.py
|
amurto/nlp-scripts
|
f1158221a87838589ccff4eaa4db5349e6ec1c8d
|
[
"MIT"
] | null | null | null |
Glove/3/train.py
|
amurto/nlp-scripts
|
f1158221a87838589ccff4eaa4db5349e6ec1c8d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""insult_train.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/11OTc2Q2mXQ1a3O0vljL4FhhZhqJqQzT_
"""
from google.colab import drive
drive.mount('/content/drive')
from numpy import array
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers.core import Activation, Dropout, Dense
from keras.layers import Flatten, LSTM
from keras.layers import GlobalMaxPooling1D
from keras.models import Model
from keras.layers.embeddings import Embedding
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from keras.layers import Input
from keras.layers.merge import Concatenate
import pandas as pd
import numpy as np
import re
import matplotlib.pyplot as plt
toxic_comments = pd.read_csv("/content/drive/My Drive/work/Keyword_Suggestion/Training/Datasets/hatekeyword.csv")
toxic_comments.head()
filter = toxic_comments["comment_text"] != ""
toxic_comments = toxic_comments[filter]
toxic_comments = toxic_comments.dropna()
toxic_comments_labels = toxic_comments[["toxic", "severe_toxic", "obscene","insult"]]
toxic_comments_labels.head()
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 10
fig_size[1] = 8
plt.rcParams["figure.figsize"] = fig_size
toxic_comments_labels.sum(axis=0).plot.bar()
def preprocess_text(sen):
# Remove punctuations and numbers
sentence = re.sub('[^a-zA-Z]', ' ', sen)
# Single character removal
sentence = re.sub(r"\s+[a-zA-Z]\s+", ' ', sentence)
# Removing multiple spaces
sentence = re.sub(r'\s+', ' ', sentence)
return sentence
X = []
sentences = list(toxic_comments["comment_text"])
for sen in sentences:
X.append(preprocess_text(sen))
y = toxic_comments_labels.values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)
tokenizer = Tokenizer(num_words=5000)
tokenizer.fit_on_texts(X_train)
X_train = tokenizer.texts_to_sequences(X_train)
X_test = tokenizer.texts_to_sequences(X_test)
vocab_size = len(tokenizer.word_index) + 1
maxlen = 200
X_train = pad_sequences(X_train, padding='post', maxlen=maxlen)
X_test = pad_sequences(X_test, padding='post', maxlen=maxlen)
from numpy import array
from numpy import asarray
from numpy import zeros
embeddings_dictionary = dict()
glove_file = open('/content/drive/My Drive/work/Emotion Detection/glove.6B.100d.txt', encoding="utf8")
for line in glove_file:
records = line.split()
word = records[0]
vector_dimensions = asarray(records[1:], dtype='float32')
embeddings_dictionary[word] = vector_dimensions
glove_file.close()
embedding_matrix = zeros((vocab_size, 100))
for word, index in tokenizer.word_index.items():
embedding_vector = embeddings_dictionary.get(word)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
deep_inputs = Input(shape=(maxlen,))
embedding_layer = Embedding(vocab_size, 100, weights=[embedding_matrix], trainable=False)(deep_inputs)
LSTM_Layer_1 = LSTM(128)(embedding_layer)
dense_layer_1 = Dense(4, activation='sigmoid')(LSTM_Layer_1)
model = Model(inputs=deep_inputs, outputs=dense_layer_1)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
print(model.summary())
from keras.utils import plot_model
plot_model(model, to_file='model_plot4a.png', show_shapes=True, show_layer_names=True)
history = model.fit(X_train, y_train, batch_size=128, epochs=5, verbose=1, validation_split=0.2)
score = model.evaluate(X_test, y_test, verbose=1)
print("Test Score:", score[0])
print("Test Accuracy:", score[1])
import matplotlib.pyplot as plt
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.show()
predictions = model.predict(X_test, verbose=1)
print(X_test)
from keras.models import load_model
model.save('insult_model.h5')
import pickle
# saving
with open('insult_tokenizer.pickle', 'wb') as handle:
pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
# loading
with open('insult_tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
model1 = load_model('insult_model.h5')
score = model1.evaluate(X_test, y_test, verbose=1)
print("Test Score:", score[0])
print("Test Accuracy:", score[1])
| 27.916667
| 113
| 0.761407
|
46c622d556a1ea49b49f91759e9d2e80d1d7d876
| 487
|
py
|
Python
|
plans/test_helpmecarry_pypnpnaoqi.py
|
LCAS/spqrel_tools
|
9dc27364811393082b86a9be5dcc654239317515
|
[
"MIT"
] | 9
|
2017-07-29T06:17:19.000Z
|
2020-12-07T08:22:57.000Z
|
plans/test_helpmecarry_pypnpnaoqi.py
|
LCAS/spqrel_tools
|
9dc27364811393082b86a9be5dcc654239317515
|
[
"MIT"
] | 48
|
2017-06-01T09:29:18.000Z
|
2020-09-16T19:50:12.000Z
|
plans/test_helpmecarry_pypnpnaoqi.py
|
LCAS/spqrel_tools
|
9dc27364811393082b86a9be5dcc654239317515
|
[
"MIT"
] | 11
|
2017-06-01T08:13:02.000Z
|
2020-11-11T20:25:36.000Z
|
import os
import sys
try:
from pnp_cmd_naoqi import *
except:
print "Please set PNP_HOME environment variable to PetriNetPlans folder."
sys.exit(1)
p = PNPCmd()
p.begin()
c = 0
#test arsenable AND stopfolloging
p.exec_action('asrenable','')
# follow the recorded person to the car
p.exec_action('updatefollowpersoncoord', 'stopfollowing', interrupt='personlost')
p.exec_action('asrenable','off')
#next thing to test
#p.exec_action('dialogue','lookforhelp')
p.end()
| 15.21875
| 81
| 0.731006
|
4d28c7c32344f59859f115e76a37320fbbb6359e
| 5,679
|
py
|
Python
|
cogbot/cogs/abc/base_cog.py
|
Arcensoth/cogbot
|
ef9d935ae8c8fbe00fb9370c75e0e6d9189141d0
|
[
"MIT"
] | 8
|
2016-12-26T14:10:38.000Z
|
2021-01-02T03:50:05.000Z
|
cogbot/cogs/abc/base_cog.py
|
Arcensoth/cogbot
|
ef9d935ae8c8fbe00fb9370c75e0e6d9189141d0
|
[
"MIT"
] | 28
|
2016-12-12T04:06:53.000Z
|
2020-04-23T06:18:55.000Z
|
cogbot/cogs/abc/base_cog.py
|
Arcensoth/cogbot
|
ef9d935ae8c8fbe00fb9370c75e0e6d9189141d0
|
[
"MIT"
] | 9
|
2017-06-03T00:33:57.000Z
|
2020-10-29T18:16:02.000Z
|
import logging
from abc import ABC, abstractmethod
from typing import Dict, Generic, Type, TypeVar, Union
from discord import Member, Message, Reaction, Server
from cogbot.cog_bot import CogBot
from cogbot.cogs.abc.base_cog_server_state import BaseCogServerState
from cogbot.types import ServerId
ResolvedOptions = Dict
RawOptions = Union[ResolvedOptions, str]
S = TypeVar("S", bound=BaseCogServerState)
class BaseCog(ABC, Generic[S]):
def __init__(self, ext: str, bot: CogBot):
self.ext: str = ext
self.bot: CogBot = bot
self.options = self.bot.state.get_extension_state(ext)
self.server_state_by_id: Dict[ServerId, S] = {}
self.log = logging.getLogger(self.ext)
def get_server_state(self, server: Server) -> S:
return self.server_state_by_id.get(server.id)
def set_server_state(self, server: Server, state: S):
if server.id in self.server_state_by_id:
raise KeyError(f"State for server {server} already exists")
self.server_state_by_id[server.id] = state
async def create_server_state(self, server: Server, options: ResolvedOptions) -> S:
server_id = server.id
state = self.server_state_class(self.ext, self.bot, server_id, options)
await state.base_setup()
return state
async def resolve_options(
self, server: Server, options: RawOptions
) -> ResolvedOptions:
# if options is a string, use external options
if isinstance(options, str):
options_address = options
self.log.info(
f"Loading state data for server {server} extension {self.ext} from: {options_address}"
)
resolved_options = await self.bot.load_json(options_address)
self.log.info(
f"Successfully loaded state data for server {server} extension {self.ext}"
)
# otherwise, we must be using inline options
elif isinstance(options, dict):
resolved_options = options
else:
raise ValueError(f"Invalid server options: not a dict or str")
return resolved_options
async def setup_state(self, server: Server, options: RawOptions):
try:
resolved_options = await self.resolve_options(server, options)
state = await self.create_server_state(server, resolved_options)
self.set_server_state(server, state)
except:
self.log.exception(
f"Failed to setup state for server {server} extension {self.ext}"
)
async def init_states(self):
raw_servers = self.options.get("servers", {})
for server_key, options in raw_servers.items():
server_key: str
options: RawOptions
server = self.bot.get_server_from_key(server_key)
if not server:
self.log.error(f"Skipping unknown server: {server_key}")
continue
await self.setup_state(server, options)
async def clear_states(self):
for state in self.server_state_by_id.values():
state: S
await state.base_teardown()
self.server_state_by_id.clear()
async def reload(self):
await self.clear_states()
await self.init_states()
async def on_ready(self):
# reload server states
await self.reload()
async def on_reaction_add(self, reaction: Reaction, reactor: Member):
# make sure this isn't a DM
if isinstance(reactor, Member):
state = self.get_server_state(reaction.message.server)
# ignore bot's reactions
if state and reactor != self.bot.user:
await state.on_reaction(reaction, reactor)
async def on_message(self, message: Message):
# make sure this isn't a DM
if message.server:
state = self.get_server_state(message.server)
# ignore bot's messages
if state and message.author != self.bot.user:
await state.on_message(message)
async def on_message_delete(self, message: Message):
# make sure this isn't a DM
if message.server:
state = self.get_server_state(message.server)
# ignore bot's messages
if state and message.author != self.bot.user:
await state.on_message_delete(message)
async def on_message_edit(self, before: Message, after: Message):
# make sure this isn't a DM
if after.server:
state = self.get_server_state(after.server)
# ignore bot's messages
if state and after.author != self.bot.user:
await state.on_message_edit(before, after)
async def on_member_join(self, member: Member):
state = self.get_server_state(member.server)
if state:
await state.on_member_join(member)
async def on_member_remove(self, member: Member):
state = self.get_server_state(member.server)
if state:
await state.on_member_remove(member)
async def on_member_ban(self, member: Member):
state = self.get_server_state(member.server)
if state:
await state.on_member_ban(member)
async def on_member_unban(self, server: Server, member: Member):
state = self.get_server_state(server)
if state:
await state.on_member_unban(server, member)
@property
@abstractmethod
def server_state_class(self) -> Type[S]:
""" Return the class itself responsible for creating the server state object. """
# TODO Can this be determined automatically? #enhance
| 37.361842
| 102
| 0.640782
|
225da5bf3869ee208b3ad3cc45163eec501688cc
| 3,762
|
py
|
Python
|
uno_product_checker.py
|
gebn/uno-product-checker
|
63a467f01d06365ea63595163697a61f3f269cfb
|
[
"MIT"
] | null | null | null |
uno_product_checker.py
|
gebn/uno-product-checker
|
63a467f01d06365ea63595163697a61f3f269cfb
|
[
"MIT"
] | 7
|
2017-08-30T09:47:59.000Z
|
2018-02-12T20:30:34.000Z
|
uno_product_checker.py
|
gebn/uno-product-checker
|
63a467f01d06365ea63595163697a61f3f269cfb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from typing import Dict
import logging
import os
import json
import requests
import boto3
_VERSION = '1.2.0'
_ENDPOINT = 'https://my.uno.net.uk/modules/addons/unobroadband' \
'/broadbandavailability.php'
_PHONE_NUMBER = os.environ['PHONE_NUMBER']
_PRODUCT_TYPE = os.environ['PRODUCT_TYPE']
_EXPECTED_PRODUCTS = {int(pid)
for pid in os.environ['EXPECTED_PRODUCTS'].split(',')}
_NOTIFICATION_TOPIC_ARN = os.environ['NOTIFICATION_TOPIC_ARN']
_NOTIFICATION_TOPIC_REGION = _NOTIFICATION_TOPIC_ARN.split(':')[3]
_PUSHOVER_APP_TOKEN = os.environ['PUSHOVER_APP_TOKEN']
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def find_available_products(type_: str, phone_number: str) -> Dict[int, str]:
"""
Ask Uno for a list of packages available on a line.
:param type_: The type of product to query for, e.g. Phone & Broadband is
'phone_broadband'.
:param phone_number: The line phone number, with no spaces or country code.
:return: A dictionary of product names, keyed by product ID.
:raises requests.exceptions.RequestException: If the request failed, or Uno
indicated a client-side
error.
"""
logger.debug(f'Querying for {type_} products for {phone_number}...')
response = requests.post(
_ENDPOINT,
headers={
'User-Agent': f'uno-product-checker/{_VERSION}'
},
data={
'phone_number': phone_number,
'type': type_
})
response.raise_for_status()
logger.info(f'Request time: {response.elapsed.total_seconds()}s')
json_ = response.json()
products = {product['id']: product['name']
for _, content in json_.items()
for _, product in content['products'].items()}
return products
def main() -> int:
"""
Executes the high-level logic of the checker.
:return: 0 on success, 1 on failure.
"""
logger.debug(f'Expected products: {_EXPECTED_PRODUCTS}')
try:
available_products = find_available_products(_PRODUCT_TYPE,
_PHONE_NUMBER)
logger.debug(f'Available products: {available_products}')
if available_products.keys() != _EXPECTED_PRODUCTS:
logger.info('Available product list has changed')
message = {
'app': _PUSHOVER_APP_TOKEN,
'title': 'New services available!',
'body': '\n'.join(f' - {name}'
for _, name in available_products.items())
}
sns_client = boto3.client('sns',
region_name=_NOTIFICATION_TOPIC_REGION)
response = sns_client.publish(
TopicArn=_NOTIFICATION_TOPIC_ARN,
Message=json.dumps(message, ensure_ascii=False))
logger.info(f"Published message {response['MessageId']} to " +
_NOTIFICATION_TOPIC_ARN)
else:
logger.info('No change to available product list')
return 0
except requests.exceptions.RequestException:
logger.exception(f'Failed to retrieve products for {_PHONE_NUMBER}')
return 1
# noinspection PyUnusedLocal
def lambda_handler(event, context) -> int:
"""
AWS Lambda entry point.
:param event: The event that triggered this execution.
:param context: Current runtime information: http://docs.aws.amazon.com
/lambda/latest/dg/python-context-object.html.
:return: The script exit code.
"""
logger.info(f'Event: {event}')
return main()
| 35.490566
| 79
| 0.608985
|
ff4a4690755ffc659a620cf08a60726570766de9
| 819
|
py
|
Python
|
tests/core/test_valid.py
|
hohe/siliconcompiler
|
497f272c87c8f247dcd29db76c8d6ed0c0939e50
|
[
"Apache-2.0"
] | 1
|
2022-03-10T03:56:49.000Z
|
2022-03-10T03:56:49.000Z
|
tests/core/test_valid.py
|
hohe/siliconcompiler
|
497f272c87c8f247dcd29db76c8d6ed0c0939e50
|
[
"Apache-2.0"
] | null | null | null |
tests/core/test_valid.py
|
hohe/siliconcompiler
|
497f272c87c8f247dcd29db76c8d6ed0c0939e50
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Silicon Compiler Authors. All Rights Reserved.
import siliconcompiler
import re
def test_valid():
chip = siliconcompiler.Chip()
chip.load_target("freepdk45_demo")
#basic
valid = chip.valid('design')
assert valid
#nest
valid = chip.valid('asic','minlayer')
assert valid
#dynamic valid
valid = chip.valid('pdk','grid', '10M', 'metal1', 'name')
assert valid
#valid b/c of default (valid for set)
valid = chip.valid('pdk','grid', 'M10', 'metal1', 'name', default_valid=True)
assert valid
#dynamic with default fields
valid = chip.valid('pdk','grid', 'default', 'default', 'name')
assert valid
#not working
valid = chip.valid('blah')
assert not valid
#########################
if __name__ == "__main__":
test_valid()
| 25.59375
| 81
| 0.626374
|
74a523d16e2cee1cab37c04eafdbc4c2bd2aef9d
| 1,223
|
py
|
Python
|
backend/app/core/config.py
|
furyhawk/textsum
|
5bea375fe3888095911abe3c4dda018e52c5c535
|
[
"MIT"
] | 2
|
2021-10-01T06:28:43.000Z
|
2021-11-19T05:40:57.000Z
|
backend/app/core/config.py
|
furyhawk/text_summarization
|
8d63574ec93a4535f2fed7915c034461c1903475
|
[
"MIT"
] | null | null | null |
backend/app/core/config.py
|
furyhawk/text_summarization
|
8d63574ec93a4535f2fed7915c034461c1903475
|
[
"MIT"
] | null | null | null |
from pydantic import BaseSettings, BaseModel
from typing import List
from functools import lru_cache
class Settings(BaseSettings):
PROJECT_NAME: str = "textsum"
MODELS: List[str] = ['Headline', 'Transformer',
'TFIDF', 'T5', 'Finetuned']
MIN_LENGTH: int = 15
MAX_LENGTH: int = 150
HEADLINE_MIN_LENGTH = 7
HEADLINE_MAX_LENGTH = 20
class Config:
env_prefix = ""
class LogConfig(BaseModel):
"""Logging configuration to be set for the server"""
LOGGER_NAME: str = "app"
LOG_FORMAT: str = "%(levelprefix)s | %(asctime)s | %(message)s"
LOG_LEVEL: str = "INFO"
# Logging config
version = 1
disable_existing_loggers = False
formatters = {
"default": {
"()": "uvicorn.logging.DefaultFormatter",
"fmt": LOG_FORMAT,
"datefmt": "%Y-%m-%d %H:%M:%S",
},
}
handlers = {
"default": {
"formatter": "default",
"class": "logging.StreamHandler",
"stream": "ext://sys.stderr",
},
}
loggers = {
LOGGER_NAME: {"handlers": ["default"], "level": LOG_LEVEL},
}
@lru_cache()
def get_settings():
return Settings()
| 23.980392
| 67
| 0.566639
|
3f49a7af81b0de05d996f67113e185b655e98d3a
| 664
|
py
|
Python
|
src/generic_info_provider.py
|
Tknika/iombian-status-display
|
98e0cbab234de0c88fc279a1621d4c7a39a6e103
|
[
"Apache-2.0"
] | null | null | null |
src/generic_info_provider.py
|
Tknika/iombian-status-display
|
98e0cbab234de0c88fc279a1621d4c7a39a6e103
|
[
"Apache-2.0"
] | null | null | null |
src/generic_info_provider.py
|
Tknika/iombian-status-display
|
98e0cbab234de0c88fc279a1621d4c7a39a6e103
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import logging
from abc import ABC, abstractmethod
logger = logging.getLogger(__name__)
class GenericInfoProvider(ABC):
def __init__(self):
self.has_changed = False
def __setattr__(self, name, value):
if name == "has_changed":
super().__setattr__(name, value)
else:
if getattr(self, name, None) != value:
self.has_changed = True
super().__setattr__(name, value)
@abstractmethod
def update(self):
changed = self.has_changed
self.has_changed = False
return changed
@abstractmethod
def to_list(self):
pass
| 22.133333
| 50
| 0.612952
|
122605386fc62b5e8949ce38a707ec35f4590848
| 219
|
py
|
Python
|
tests/__init__.py
|
TigerDX/dj-stripe
|
2fd4897abaedf2d9faa3dd5af86402dae3ab86a3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/__init__.py
|
TigerDX/dj-stripe
|
2fd4897abaedf2d9faa3dd5af86402dae3ab86a3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/__init__.py
|
TigerDX/dj-stripe
|
2fd4897abaedf2d9faa3dd5af86402dae3ab86a3
|
[
"BSD-3-Clause"
] | 1
|
2021-08-30T10:51:49.000Z
|
2021-08-30T10:51:49.000Z
|
from stripe import api_key
from stripe.resource import convert_to_stripe_object
def convert_to_fake_stripe_object(response):
return convert_to_stripe_object(resp=response, api_key=api_key, account="test_account")
| 31.285714
| 91
| 0.849315
|
8f2cefe597d992f54a5f7332979fa041763d6ccc
| 252
|
py
|
Python
|
manage.py
|
sdzharkov/Trip-Split
|
e485a17383737b53eec1878650eea60567ae78a9
|
[
"MIT"
] | 3
|
2017-03-05T03:24:18.000Z
|
2017-04-22T11:25:07.000Z
|
manage.py
|
sdzharkov/Trip-Split
|
e485a17383737b53eec1878650eea60567ae78a9
|
[
"MIT"
] | null | null | null |
manage.py
|
sdzharkov/Trip-Split
|
e485a17383737b53eec1878650eea60567ae78a9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tripSlice.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 22.909091
| 73
| 0.77381
|
583342465ceaa7f1239189e258d50209af5bfe92
| 775
|
py
|
Python
|
form_designer/utils.py
|
sergey-romanov/django-form-designer
|
cddfb5c87759f57c68208509327078bd690295b9
|
[
"BSD-3-Clause"
] | null | null | null |
form_designer/utils.py
|
sergey-romanov/django-form-designer
|
cddfb5c87759f57c68208509327078bd690295b9
|
[
"BSD-3-Clause"
] | null | null | null |
form_designer/utils.py
|
sergey-romanov/django-form-designer
|
cddfb5c87759f57c68208509327078bd690295b9
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
def get_class(import_path):
try:
dot = import_path.rindex('.')
except ValueError:
raise ImproperlyConfigured("%s isn't a Python path." % import_path)
module, classname = import_path[:dot], import_path[dot + 1:]
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing module %s: "%s"' %
(module, e))
try:
return getattr(mod, classname)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" '
'class.' % (module, classname))
| 36.904762
| 75
| 0.610323
|
7347cd40a916c2fe03044512669cebe0998f430f
| 3,768
|
py
|
Python
|
doc/integrations/pytorch/parlai/crowdsourcing/utils/worlds.py
|
novium258/cortx-1
|
ce5b939b33b8d24d89b31807ac3bcaa8f24096bc
|
[
"Apache-2.0"
] | 1
|
2020-09-27T05:00:06.000Z
|
2020-09-27T05:00:06.000Z
|
doc/integrations/pytorch/parlai/crowdsourcing/utils/worlds.py
|
novium258/cortx-1
|
ce5b939b33b8d24d89b31807ac3bcaa8f24096bc
|
[
"Apache-2.0"
] | 1
|
2021-08-04T11:17:39.000Z
|
2021-08-04T11:17:39.000Z
|
doc/integrations/pytorch/parlai/crowdsourcing/utils/worlds.py
|
novium258/cortx-1
|
ce5b939b33b8d24d89b31807ac3bcaa8f24096bc
|
[
"Apache-2.0"
] | 1
|
2021-05-03T13:27:14.000Z
|
2021-05-03T13:27:14.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.worlds import World
class CrowdDataWorld(World):
def prep_save_data(self, workers):
"""
This prepares data to be saved for later review, including chats from individual
worker perspectives.
"""
custom_data = self.get_custom_task_data()
save_data = {'custom_data': custom_data, 'worker_data': {}}
return save_data
def get_custom_task_data(self):
"""
This function should take the contents of whatever was collected during this
task that should be saved and return it in some format, preferrably a dict
containing acts.
If you need some extraordinary data storage that this doesn't cover, you can
extend the ParlAIChatBlueprint and write your own ParlAIChatAgentState that
defines the behavior you want.
"""
# return {
# 'acts': [self.important_turn1, self.important_turn2]
# 'context': self.some_context_data_of_importance
# }
pass
class CrowdOnboardWorld(CrowdDataWorld):
"""
Generic world for onboarding a Turker and collecting information from them.
"""
def __init__(self, opt, agent):
"""
Init should set up resources for running the onboarding world.
"""
self.agent = agent
self.episodeDone = False
def parley(self):
"""
A parley should represent one turn of your onboarding task.
"""
self.episodeDone = True
def episode_done(self):
return self.episodeDone
def shutdown(self):
"""
Clear up resources needed for this world.
"""
pass
class CrowdTaskWorld(CrowdDataWorld):
"""
Generic world for Crowd tasks.
"""
def __init__(self, opt, agent):
"""
Init should set up resources for running the task world.
"""
self.agent = agent
self.episodeDone = False
def parley(self):
"""
A parley should represent one turn of your task.
"""
self.episodeDone = True
def episode_done(self):
"""
A ParlAI-Mephisto task ends and allows workers to be marked complete when the
world is finished.
"""
return self.episodeDone
def shutdown(self):
"""
Should be used to free the world's resources and shut down the agents.
"""
self.agent.shutdown()
def review_work(self):
"""
Programmatically approve/reject this work. Doing this now (if possible) means
that you don't need to do the work of reviewing later on.
For example:
.. code-block:: python
mephisto_agent = self.agent.mephisto_agent
if self.response == '0':
mephisto_agent.reject_work(
'You rated our model's response as a 0/10 but we '
'know we\'re better than that'
)
else:
if self.response == '10':
mephisto_agent.pay_bonus(1, 'Thanks for a great rating!')
mephisto_agent.approve_work()
"""
# mephisto_agent = self.agent.mephisto_agent
# mephisto_agent.approve_work()
# mephisto_agent.reject_work()
# mephisto_agent.pay_bonus(1000) # Pay $1000 as bonus
# mephisto_agent.block_worker() # Block this worker from future work
pass
| 31.4
| 89
| 0.586783
|
f18ef2a950ba73b423d3005a5b48ab7f6018859a
| 4,835
|
py
|
Python
|
src/communication/azext_communication/vendored_sdks/communication/aio/operations/_operations.py
|
saisankargochhayat/azure-cli-extensions
|
f89443a766961f984977a2cf1b682973fcb61edd
|
[
"MIT"
] | null | null | null |
src/communication/azext_communication/vendored_sdks/communication/aio/operations/_operations.py
|
saisankargochhayat/azure-cli-extensions
|
f89443a766961f984977a2cf1b682973fcb61edd
|
[
"MIT"
] | null | null | null |
src/communication/azext_communication/vendored_sdks/communication/aio/operations/_operations.py
|
saisankargochhayat/azure-cli-extensions
|
f89443a766961f984977a2cf1b682973fcb61edd
|
[
"MIT"
] | 1
|
2022-02-14T21:43:29.000Z
|
2022-02-14T21:43:29.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~communication_service_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["models.OperationListResult"]:
"""List Operations.
Lists all of the available REST API operations of the Microsoft.Communication provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~communication_service_management_client.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-20"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Communication/operations'} # type: ignore
| 44.357798
| 133
| 0.664529
|
650ce5c176d1cacd803ff7a9ee340149a5bab7dc
| 2,605
|
py
|
Python
|
utils/manager.py
|
TheGoofBall64/Kurisu
|
a7a7162b36da97ec8404acb2fd39380f84f270df
|
[
"Apache-2.0"
] | null | null | null |
utils/manager.py
|
TheGoofBall64/Kurisu
|
a7a7162b36da97ec8404acb2fd39380f84f270df
|
[
"Apache-2.0"
] | null | null | null |
utils/manager.py
|
TheGoofBall64/Kurisu
|
a7a7162b36da97ec8404acb2fd39380f84f270df
|
[
"Apache-2.0"
] | null | null | null |
from typing import Optional, List
from utils.models import FilteredWord, ApprovedInvite
import re
class WordFilterManager:
def __init__(self):
self.kinds = ('piracy tool', 'piracy video', 'piracy tool alert', 'drama', 'unbanning tool', 'piracy site')
self.filter = {}
self.word_exp = {}
async def load(self):
for kind in self.kinds:
self.filter[kind] = []
for entry in await self.fetch_by_kind(kind=kind):
self.filter[kind].append(entry.word)
self.word_exp[entry.word] = re.compile(r"[ *_\-~]*".join(list(entry.word)))
print("Loaded word filter")
async def add(self, word: str, kind: str) -> FilteredWord:
entry = await FilteredWord.create(word=word, kind=kind)
await self.load()
return entry
@staticmethod
async def fetch_by_kind(kind: str) -> List[FilteredWord]:
return await FilteredWord.query.where(FilteredWord.kind == kind).gino.all()
@staticmethod
async def fetch_word(word: str) -> Optional[FilteredWord]:
return await FilteredWord.get(word)
async def delete(self, word: str) -> Optional[FilteredWord]:
entry = await self.fetch_word(word)
if entry:
await entry.delete()
self.filter[entry.kind].remove(entry.word)
del self.word_exp[entry.word]
return entry
class InviteFilterManager:
def __init__(self):
self.invites = []
async def load(self):
self.invites.clear()
self.invites = await self.fetch_all()
async def add(self, code: str, alias: str, uses: int) -> ApprovedInvite:
entry = await ApprovedInvite.create(code=code, uses=uses, alias=alias)
self.invites.append(entry)
return entry
@staticmethod
async def fetch_all() -> List[ApprovedInvite]:
return await ApprovedInvite.query.gino.all()
@staticmethod
async def fetch_invite_by_alias(alias) -> Optional[ApprovedInvite]:
return await ApprovedInvite.query.where(ApprovedInvite.alias == alias).gino.first()
@staticmethod
async def fetch_invite_by_code(code) -> Optional[ApprovedInvite]:
return await ApprovedInvite.get(code)
async def set_uses(self, code: str, uses: int):
invite = await ApprovedInvite.get(code)
await invite.update(uses=uses).apply()
await self.load()
async def delete(self, code: str):
entry = await self.fetch_invite_by_code(code)
if entry:
await entry.delete()
await self.load()
return entry
| 32.974684
| 115
| 0.639923
|
001330d73f75c6c9bdba1de408cecd3d53786a56
| 4,066
|
py
|
Python
|
rbackup/struct/hierarchy.py
|
etorres4/rbackup
|
b4f7478a4b71ad4b444f83ab5aeae1ed22315370
|
[
"MIT"
] | null | null | null |
rbackup/struct/hierarchy.py
|
etorres4/rbackup
|
b4f7478a4b71ad4b444f83ab5aeae1ed22315370
|
[
"MIT"
] | null | null | null |
rbackup/struct/hierarchy.py
|
etorres4/rbackup
|
b4f7478a4b71ad4b444f83ab5aeae1ed22315370
|
[
"MIT"
] | null | null | null |
"""
.. moduleauthor:: Eric Torres
.. module:: rbackup.struct.hierarchy
:synopsis: Related functionality for creating the backup hierarchy.
"""
import json
import logging
import os
import shutil
from pathlib import Path
# ========== Logging Setup ===========
syslog = logging.getLogger(__name__)
# ========== Constants ==========
DIRMODE = 0o755
FILEMODE = 0o644
METADATA_READ = "r"
METADATA_WRITE = "w"
# ========== Classes ==========
class Hierarchy(os.PathLike):
"""A general class for organizing a hierarchy of data.
**Implementation Details**
* Data for all ``Hierarchy`` objects and subclassed objects use ``JSON`` for serialization
* ``Hierarchy`` objects create their directories upon instantiation
* Reading metadata without the proper permissions may result in a ``PermissionError``
"""
def __init__(self, dest):
"""Default constructor for the ``Hierarchy`` class.
:param dest: the root directory of the backup hierarchy
:type dest: str or path-like object
:raises PermissionError: if process does not have permission to write at dest
"""
self._path = Path(dest)
self._metadata_path = self._path / ".metadata"
self._name = self._path.name
self.path.mkdir(DIRMODE, parents=True, exist_ok=True)
def __eq__(self, other):
return self._path == other.path
def __fspath__(self):
return str(self._path)
def __hash__(self):
# We want to use this Hierarchy's path because it never changes
return hash(self._path)
def __ne__(self, other):
return self._path != other.path
def __repr__(self):
"""Return a string representation of this Hierarchy."""
return f"{self.__class__.__name__}('{self._path}')"
def __str__(self):
"""Return a string representation of this Hierarchy."""
return str(self._path)
def _gen_metadata(self):
"""Generate metadata for this repository.
After this method is called, the data necessary for this hierarchy has been created.
"""
raise NotImplementedError("This method must be called in a child class.")
@property
def path(self):
"""
:return: the base directory of this hierarchy
:rtype: path-like object
"""
return self._path
@property
def name(self):
"""
:return: the name of this hierarchy.
:rtype: str
"""
return self._name
@property
def metadata_path(self):
"""
:return: the path of this hierarchy's metadata file.
:rtype: path-like object
"""
return self._metadata_path
def cleanup(self):
"""Clean up this Hierarchy's data from the filesystem."""
syslog.info("Performing cleanup on %s", self._path)
# We don't want to risk symlink attacks
# noinspection PyUnresolvedReferences
if not shutil.rmtree.avoids_symlink_attacks:
syslog.warning(
"shutil cannot avoid symlink attacks on this platform. Ignoring."
)
return
shutil.rmtree(self)
def read_metadata(self):
"""Read this repository's metadata from its file and
then return it.
:rtype: type that the data is serialized as
"""
syslog.debug("Reading metadata from %s", self.metadata_path)
with self.metadata_path.open(mode=METADATA_READ) as mfile:
return json.load(mfile)
def write_metadata(self, attr):
"""Write this repository's metadata to its metadata file.
.. note:: This write operation is atomic from the perspective of the caller
:param attr: class data to write to file
:type attr: any type
"""
syslog.debug("Writing metadata to %s", self.metadata_path)
tmpfile = self.metadata_path.with_suffix(".tmp")
with tmpfile.open(mode=METADATA_WRITE) as mfile:
json.dump(attr, mfile)
tmpfile.rename(self.metadata_path)
| 28.633803
| 94
| 0.627644
|
c64a07c30e7f0d5a25ffbf3d10ed98031996762a
| 1,064
|
py
|
Python
|
accounts/views.py
|
Arifa96/farmerhand
|
11af1d1ae42192df7344622bbcbcfcec625a886c
|
[
"MIT"
] | null | null | null |
accounts/views.py
|
Arifa96/farmerhand
|
11af1d1ae42192df7344622bbcbcfcec625a886c
|
[
"MIT"
] | null | null | null |
accounts/views.py
|
Arifa96/farmerhand
|
11af1d1ae42192df7344622bbcbcfcec625a886c
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import (
authenticate,
get_user_model,
login,
logout,
)
from django.shortcuts import render
from .forms import UserLoginForm, UserRegisterForm
# Create your views here.
def login_view(request):
title = "Login"
print(request.user.is_authenticated)
form = UserLoginForm(request.POST or None)
if form.is_valid():
username = form.cleaned_data.get("username")
pasword = form.cleaned_data.get('password')
user = authenticate(username= username,password =pasword)
login(request,user)
print(request.user.is_authenticated)
return render(request, "form.html", {"form":form, "title": title})
def register_view(request):
title = "Register"
form = UserRegisterForm(request.POST or None)
context = {
"form": form,
"title": title
}
return render(request, "form.html", context)
def logout_view(request):
logout(request)
return render(request, "form.html", {})
def view_post(request):
return render(request,"post.html")
| 23.644444
| 70
| 0.672932
|
73719ac48368d239ddf078ecd7ab9dbb58fb21d4
| 1,442
|
py
|
Python
|
mars/optimization/physical/tests/test_cupy.py
|
ConanoutlooklvTBS/mars
|
7030566fd9e9fc02b6b4064ef7bd86f6c24a2f60
|
[
"Apache-2.0"
] | 2,413
|
2018-12-06T09:37:11.000Z
|
2022-03-30T15:47:39.000Z
|
mars/optimization/physical/tests/test_cupy.py
|
ConanoutlooklvTBS/mars
|
7030566fd9e9fc02b6b4064ef7bd86f6c24a2f60
|
[
"Apache-2.0"
] | 1,335
|
2018-12-07T03:06:18.000Z
|
2022-03-31T11:45:57.000Z
|
mars/optimization/physical/tests/test_cupy.py
|
ConanoutlooklvTBS/mars
|
7030566fd9e9fc02b6b4064ef7bd86f6c24a2f60
|
[
"Apache-2.0"
] | 329
|
2018-12-07T03:12:41.000Z
|
2022-03-29T21:49:57.000Z
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .... import tensor as mt
from ....core import enter_mode, TileableGraph, \
TileableGraphBuilder, ChunkGraphBuilder
from ..cupy import CupyRuntimeOptimizer
@enter_mode(build=True)
def test_cupy():
t1 = mt.ones((100, 50), chunk_size=50, gpu=True)
t2 = mt.ones(50, chunk_size=50, gpu=True)
t = (t1 - t2) / mt.sqrt(t2 * (1 - t2) * len(t2))
graph = TileableGraph([t.data])
next(TileableGraphBuilder(graph).build())
context = dict()
chunk_graph_builder = ChunkGraphBuilder(graph,
fuse_enabled=False,
tile_context=context)
chunk_graph = next(chunk_graph_builder.build())
CupyRuntimeOptimizer(chunk_graph).optimize()
assert any(n.op.__class__.__name__ == 'TensorCpFuseChunk'
for n in chunk_graph)
| 37.947368
| 74
| 0.680999
|
3aa4a34e921942b8dac987b3c95974499f6c74a7
| 8,530
|
py
|
Python
|
pgmpy/tests/test_estimators/test_HillClimbSearch.py
|
yzh211/pgmpy
|
f3abe04abb75db9f51f333ecf9429a8700477b55
|
[
"MIT"
] | 1
|
2022-03-28T13:52:31.000Z
|
2022-03-28T13:52:31.000Z
|
pgmpy/tests/test_estimators/test_HillClimbSearch.py
|
yzh211/pgmpy
|
f3abe04abb75db9f51f333ecf9429a8700477b55
|
[
"MIT"
] | null | null | null |
pgmpy/tests/test_estimators/test_HillClimbSearch.py
|
yzh211/pgmpy
|
f3abe04abb75db9f51f333ecf9429a8700477b55
|
[
"MIT"
] | null | null | null |
import unittest
import pandas as pd
import numpy as np
from pgmpy.estimators import HillClimbSearch, K2Score
from pgmpy.models import BayesianModel
class TestHillClimbEstimator(unittest.TestCase):
def setUp(self):
self.rand_data = pd.DataFrame(
np.random.randint(0, 5, size=(int(1e4), 2)), columns=list("AB")
)
self.rand_data["C"] = self.rand_data["B"]
self.est_rand = HillClimbSearch(self.rand_data)
self.score_rand = K2Score(self.rand_data).local_score
self.model1 = BayesianModel()
self.model1.add_nodes_from(["A", "B", "C"])
self.model1_possible_edges = set(
[(u, v) for u in self.model1.nodes() for v in self.model1.nodes()]
)
self.model2 = self.model1.copy()
self.model2.add_edge("A", "B")
self.model2_possible_edges = set(
[(u, v) for u in self.model2.nodes() for v in self.model2.nodes()]
)
# link to dataset: "https://www.kaggle.com/c/titanic/download/train.csv"
self.titanic_data = pd.read_csv(
"pgmpy/tests/test_estimators/testdata/titanic_train.csv"
)
self.titanic_data1 = self.titanic_data[
["Survived", "Sex", "Pclass", "Age", "Embarked"]
]
self.est_titanic1 = HillClimbSearch(self.titanic_data1)
self.score_titanic1 = K2Score(self.titanic_data1).local_score
self.titanic_data2 = self.titanic_data[["Survived", "Sex", "Pclass"]]
self.est_titanic2 = HillClimbSearch(self.titanic_data2)
self.score_titanic2 = K2Score(self.titanic_data2).local_score
def test_legal_operations(self):
model2_legal_ops = list(
self.est_rand._legal_operations(
model=self.model2,
score=self.score_rand,
tabu_list=set(),
max_indegree=float("inf"),
black_list=set(),
white_list=self.model2_possible_edges,
fixed_edges=set(),
)
)
model2_legal_ops_ref = [
(("+", ("C", "A")), -28.15602208305154),
(("+", ("A", "C")), -28.155467430966382),
(("+", ("C", "B")), 7636.947544933631),
(("+", ("B", "C")), 7937.805375579936),
(("-", ("A", "B")), 28.155467430966382),
(("flip", ("A", "B")), -0.0005546520851567038),
]
self.assertSetEqual(
set([op for op, score in model2_legal_ops]),
set([op for op, score in model2_legal_ops_ref]),
)
def test_legal_operations_blacklist_whitelist(self):
model2_legal_ops_bl = list(
self.est_rand._legal_operations(
model=self.model2,
score=self.score_rand,
tabu_list=set(),
max_indegree=float("inf"),
black_list=set([("A", "B"), ("A", "C"), ("C", "A"), ("C", "B")]),
white_list=self.model2_possible_edges,
fixed_edges=set(),
)
)
model2_legal_ops_bl_ref = [
("+", ("B", "C")),
("-", ("A", "B")),
("flip", ("A", "B")),
]
self.assertSetEqual(
set([op for op, score in model2_legal_ops_bl]), set(model2_legal_ops_bl_ref)
)
model2_legal_ops_wl = list(
self.est_rand._legal_operations(
model=self.model2,
score=self.score_rand,
tabu_list=set(),
max_indegree=float("inf"),
black_list=set(),
white_list=set([("A", "B"), ("A", "C"), ("C", "A"), ("A", "B")]),
fixed_edges=set(),
)
)
model2_legal_ops_wl_ref = [
("+", ("A", "C")),
("+", ("C", "A")),
("-", ("A", "B")),
]
self.assertSetEqual(
set([op for op, score in model2_legal_ops_wl]), set(model2_legal_ops_wl_ref)
)
def test_legal_operations_titanic(self):
start_model = BayesianModel(
[("Survived", "Sex"), ("Pclass", "Age"), ("Pclass", "Embarked")]
)
all_possible_edges = set(
[(u, v) for u in start_model.nodes() for v in start_model.nodes()]
)
legal_ops = self.est_titanic1._legal_operations(
model=start_model,
score=self.score_titanic1,
tabu_list=[],
max_indegree=float("inf"),
black_list=set(),
white_list=all_possible_edges,
fixed_edges=set(),
)
self.assertEqual(len(list(legal_ops)), 20)
tabu_list = [
("-", ("Survived", "Sex")),
("-", ("Survived", "Pclass")),
("flip", ("Age", "Pclass")),
]
legal_ops_tabu = self.est_titanic1._legal_operations(
model=start_model,
score=self.score_titanic1,
tabu_list=tabu_list,
max_indegree=float("inf"),
black_list=set(),
white_list=all_possible_edges,
fixed_edges=set(),
)
self.assertEqual(len(list(legal_ops_tabu)), 18)
legal_ops_indegree = self.est_titanic1._legal_operations(
model=start_model,
score=self.score_titanic1,
tabu_list=[],
max_indegree=1,
black_list=set(),
white_list=all_possible_edges,
fixed_edges=set(),
)
self.assertEqual(len(list(legal_ops_indegree)), 11)
legal_ops_both = self.est_titanic1._legal_operations(
model=start_model,
score=self.score_titanic1,
tabu_list=tabu_list,
max_indegree=1,
black_list=set(),
white_list=all_possible_edges,
fixed_edges=set(),
)
legal_ops_both_ref = {
("+", ("Embarked", "Survived")): 10.050632580087495,
("+", ("Survived", "Pclass")): 41.8886804654893,
("+", ("Age", "Survived")): -23.635716036430722,
("+", ("Pclass", "Survived")): 41.81314459373152,
("+", ("Sex", "Pclass")): 4.772261678791324,
("-", ("Pclass", "Age")): 11.546515590730905,
("-", ("Pclass", "Embarked")): -32.17148283253266,
("flip", ("Pclass", "Embarked")): 3.3563814191275583,
("flip", ("Survived", "Sex")): 0.0397370279797542,
}
self.assertSetEqual(
set([op for op, score in legal_ops_both]), set(legal_ops_both_ref)
)
for op, score in legal_ops_both:
self.assertAlmostEqual(score, legal_ops_both_ref[op])
def test_estimate_rand(self):
est1 = self.est_rand.estimate()
self.assertSetEqual(set(est1.nodes()), set(["A", "B", "C"]))
self.assertTrue(
list(est1.edges()) == [("B", "C")] or list(est1.edges()) == [("C", "B")]
)
est2 = self.est_rand.estimate(start_dag=BayesianModel([("A", "B"), ("A", "C")]))
self.assertTrue(
list(est2.edges()) == [("B", "C")] or list(est2.edges()) == [("C", "B")]
)
est3 = self.est_rand.estimate(fixed_edges=[("B", "C")])
self.assertTrue([("B", "C")] == list(est3.edges()))
def test_estimate_titanic(self):
self.assertSetEqual(
set(self.est_titanic2.estimate().edges()),
set([("Survived", "Pclass"), ("Sex", "Pclass"), ("Sex", "Survived")]),
)
self.assertTrue(
("Pclass", "Survived")
in self.est_titanic2.estimate(fixed_edges=[("Pclass", "Survived")]).edges()
)
def test_no_legal_operation(self):
data = pd.DataFrame(
[
[1, 0, 0, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 0, 0, 1, 0, 1, 0],
[1, 0, 0, 0, 0, 1, 0, 1, 1],
[1, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 1, 0, 0],
],
columns=list("ABCDEFGHI"),
)
est = HillClimbSearch(data)
best_model = est.estimate(
fixed_edges=[("A", "B"), ("B", "C")], white_list=[("F", "C")]
)
self.assertEqual(
set(best_model.edges()), set([("A", "B"), ("B", "C"), ("F", "C")])
)
def tearDown(self):
del self.rand_data
del self.est_rand
del self.model1
del self.titanic_data
del self.titanic_data1
del self.titanic_data2
del self.est_titanic1
del self.est_titanic2
| 35.991561
| 88
| 0.515358
|
200f74abbd7b0543284b6b6ef2903efed8f310f9
| 346
|
py
|
Python
|
backend/server.py
|
chan-w/vaccine-text-signup
|
f926aa76724ffd5fe1d473fd6cdb70ed50ee982d
|
[
"MIT"
] | null | null | null |
backend/server.py
|
chan-w/vaccine-text-signup
|
f926aa76724ffd5fe1d473fd6cdb70ed50ee982d
|
[
"MIT"
] | null | null | null |
backend/server.py
|
chan-w/vaccine-text-signup
|
f926aa76724ffd5fe1d473fd6cdb70ed50ee982d
|
[
"MIT"
] | null | null | null |
from flask import Flask, request, Response, jsonify, redirect, render_template
# flask_cors import CORS
app = Flask(__name__)
# CORS(app)
@app.route("/", methods=['GET', 'POST'])
def landing():
return render_template("index.html")
if __name__ == '__main__':
app.run(
host='0.0.0.0',
port=4000,
debug=True
)
| 21.625
| 78
| 0.632948
|
7d74c51327549286c18e4969e54c91e434205178
| 9,771
|
py
|
Python
|
deepcpg/data/utils.py
|
cangermueller/deepcpg2
|
7f58da5423121168edabb27202c234df0f0e460d
|
[
"MIT"
] | 151
|
2016-05-30T07:05:11.000Z
|
2022-02-28T08:23:55.000Z
|
deepcpg/data/utils.py
|
cangermueller/deepcpg2
|
7f58da5423121168edabb27202c234df0f0e460d
|
[
"MIT"
] | 41
|
2016-08-02T17:18:00.000Z
|
2021-05-31T10:15:18.000Z
|
deepcpg/data/utils.py
|
cangermueller/deepcpg2
|
7f58da5423121168edabb27202c234df0f0e460d
|
[
"MIT"
] | 70
|
2016-05-31T14:25:14.000Z
|
2021-08-30T02:28:33.000Z
|
"""General purpose IO functions."""
from __future__ import division
from __future__ import print_function
import gzip
import threading
import re
import h5py as h5
import numpy as np
import pandas as pd
import six
from six.moves import range
from . import hdf
# Constant for missing labels.
CPG_NAN = -1
# Constant for separating output names, e.g. 'cpg/cell'.
OUTPUT_SEP = '/'
class threadsafe_iter:
"""Takes an iterator/generator and makes it thread-safe by
serializing call to the `next` method of given iterator/generator.
"""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def __next__(self):
with self.lock:
return next(self.it)
def next(self):
return self.__next__()
def threadsafe_generator(f):
"""A decorator that takes a generator function and makes it thread-safe."""
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g
def add_to_dict(src, dst):
"""Add `dict `src` to `dict` `dst`
Adds values in `dict` `src` to `dict` `dst` with same keys but values are
lists of added values. lists of values in `dst` can be stacked with
:func:`stack_dict`. Used for example in `dpcg_eval.py` to stack dicts from
different batches.
Example
-------
src = dict()
src['a'] = 1
src['b'] = {'b1': 10}
dst = dict()
add_to_dict(src, dst)
add_to_dict(src, dst)
-> dst['a'] = [1, 1]
-> dst['b'] = {'b1': [10, 10]}
"""
for key, value in six.iteritems(src):
if isinstance(value, dict):
if key not in dst:
dst[key] = dict()
add_to_dict(value, dst[key])
else:
if key not in dst:
dst[key] = []
dst[key].append(value)
def stack_dict(data):
"""Stacks lists of numpy arrays in `dict` `data`."""
sdata = dict()
for key, value in six.iteritems(data):
if isinstance(value, dict):
sdata[key] = stack_dict(value)
else:
fun = np.vstack if value[0].ndim > 1 else np.hstack
sdata[key] = fun(value)
return sdata
def get_nb_sample(data_files, nb_max=None, batch_size=None):
"""Count number of samples in all `data_files`.
Parameters
----------
data_files: list
`list` with file name of DeepCpG data files.
nb_max: int
If defined, stop counting if that number is reached.
batch_size: int
If defined, return the largest multiple of `batch_size` that is smaller
or equal than the actual number of samples.
Returns
-------
int
Number of samples in `data_files`.
"""
nb_sample = 0
for data_file in data_files:
data_file = h5.File(data_file, 'r')
nb_sample += len(data_file['pos'])
data_file.close()
if nb_max and nb_sample > nb_max:
nb_sample = nb_max
break
if batch_size:
nb_sample = (nb_sample // batch_size) * batch_size
return nb_sample
def get_dna_wlen(data_file, max_len=None):
"""Return length of DNA sequence windows stored in `data_file`."""
data_file = h5.File(data_file, 'r')
wlen = data_file['/inputs/dna'].shape[1]
if max_len:
wlen = min(max_len, wlen)
return wlen
def get_cpg_wlen(data_file, max_len=None):
"""Return number of CpG neighbors stored in `data_file`."""
data_file = h5.File(data_file, 'r')
group = data_file['/inputs/cpg']
wlen = group['%s/dist' % list(group.keys())[0]].shape[1]
if max_len:
wlen = min(max_len, wlen)
return wlen
def get_output_names(data_file, *args, **kwargs):
"""Return name of outputs stored in `data_file`."""
return hdf.ls(data_file, 'outputs',
recursive=True,
groups=False,
*args, **kwargs)
def get_replicate_names(data_file, *args, **kwargs):
"""Return name of replicates stored in `data_file`."""
return hdf.ls(data_file, 'inputs/cpg',
recursive=False,
groups=True,
must_exist=False,
*args, **kwargs)
def get_anno_names(data_file, *args, **kwargs):
"""Return name of annotations stored in `data_file`."""
return hdf.ls(data_file, 'inputs/annos',
recursive=False,
*args, **kwargs)
def is_bedgraph(filename):
"""Test if `filename` is a bedGraph file.
bedGraph files are assumed to start with 'track type=bedGraph'
"""
if isinstance(filename, str):
with open(filename) as f:
line = f.readline()
else:
pos = filename.tell()
line = filename.readline()
if isinstance(line, bytes):
line = line.decode()
filename.seek(pos)
return re.match(r'track\s+type=bedGraph', line) is not None
def format_chromo(chromo):
"""Format chromosome name.
Makes name upper case, e.g. 'mt' -> 'MT' and removes 'chr',
e.g. 'chr1' -> '1'.
"""
return chromo.str.upper().str.replace('^CHR', '')
def sample_from_chromo(frame, nb_sample):
"""Randomly sample `nb_sample` samples from each chromosome.
Samples `nb_sample` records from :class:`pandas.DataFrame` which must
contain a column with name 'chromo'.
"""
def sample_frame(frame):
if len(frame) <= nb_sample:
return frame
idx = np.random.choice(len(frame), nb_sample, replace=False)
return frame.iloc[idx]
frame = frame.groupby('chromo', as_index=False).apply(sample_frame)
frame.index = range(len(frame))
return frame
def is_binary(values):
"""Check if values in array `values` are binary, i.e. zero or one."""
return ~np.any((values > 0) & (values < 1))
def read_cpg_profile(filename, chromos=None, nb_sample=None, round=False,
sort=True, nb_sample_chromo=None):
"""Read CpG profile from TSV or bedGraph file.
Reads CpG profile from either tab delimited file with columns
`chromo`, `pos`, `value`. `value` or bedGraph file. `value` columns contains
methylation states, which can be binary or continuous.
Parameters
----------
filenamne: str
Path of file.
chromos: list
List of formatted chromosomes to be read, e.g. ['1', 'X'].
nb_sample: int
Maximum number of sample in total.
round: bool
If `True`, round methylation states in column 'value' to zero or one.
sort: bool
If `True`, sort by rows by chromosome and position.
nb_sample_chromo: int
Maximum number of sample per chromosome.
Returns
-------
:class:`pandas.DataFrame`
:class:`pandas.DataFrame` with columns `chromo`, `pos`, `value`.
"""
if is_bedgraph(filename):
usecols = [0, 1, 3]
skiprows = 1
else:
usecols = [0, 1, 2]
skiprows = 0
dtype = {usecols[0]: np.str, usecols[1]: np.int32, usecols[2]: np.float32}
nrows = None
if chromos is None and nb_sample_chromo is None:
nrows = nb_sample
d = pd.read_table(filename, header=None, comment='#', nrows=nrows,
usecols=usecols, dtype=dtype, skiprows=skiprows)
d.columns = ['chromo', 'pos', 'value']
if np.any((d['value'] < 0) | (d['value'] > 1)):
raise ValueError('Methylation values must be between 0 and 1!')
d['chromo'] = format_chromo(d['chromo'])
if chromos is not None:
if not isinstance(chromos, list):
chromos = [str(chromos)]
d = d.loc[d.chromo.isin(chromos)]
if len(d) == 0:
raise ValueError('No data available for selected chromosomes!')
if nb_sample_chromo is not None:
d = sample_from_chromo(d, nb_sample_chromo)
if nb_sample is not None:
d = d.iloc[:nb_sample]
if sort:
d.sort_values(['chromo', 'pos'], inplace=True)
if round:
d['value'] = np.round(d.value)
if is_binary(d['value']):
d['value'] = d['value'].astype(np.int8)
return d
class GzipFile(object):
"""Wrapper to read and write gzip-compressed files.
If `filename` ends with `gz`, opens file with gzip package, otherwise
builtin `open` function.
Parameters
----------
filename: str
Path of file
mode: str
File access mode
*args: list
Unnamed arguments passed to open function.
**kwargs: dict
Named arguments passed to open function.
"""
def __init__(self, filename, mode='r', *args, **kwargs):
self.is_gzip = filename.endswith('.gz')
if self.is_gzip:
self.fh = gzip.open(filename, mode, *args, **kwargs)
else:
self.fh = open(filename, mode, *args, **kwargs)
def __iter__(self):
return self.fh.__iter__()
def __next__(self):
return self.fh.__next__()
def read(self, *args, **kwargs):
return self.fh.read(*args, **kwargs)
def readline(self, *args, **kwargs):
return self.fh.readline(*args, **kwargs)
def readlines(self, *args, **kwargs):
return self.fh.readlines(*args, **kwargs)
def write(self, data):
if self.is_gzip and isinstance(data, str):
data = data.encode()
self.fh.write(data)
def writelines(self, *args, **kwargs):
self.fh.writelines(*args, **kwargs)
def tell(self, *args, **kwargs):
return self.fh.tell(*args, **kwargs)
def seek(self, *args, **kwargs):
self.fh.seek(*args, **kwargs)
def closed(self):
return self.fh.closed()
def close(self):
self.fh.close()
def __iter__(self):
self.fh.__iter__()
def iter(self):
self.fh.iter()
| 28.570175
| 80
| 0.599427
|
c72f7873635f865befc7c662bcf1cb51e4e35887
| 1,273
|
py
|
Python
|
LanguageIdentification/AccuracyOnSyntheticCiphers/Scripts/aggResults.py
|
abramhindle/dorabella-experiments
|
2b7fce05964c184f048db75f6ead80bf81a15d97
|
[
"CC0-1.0"
] | null | null | null |
LanguageIdentification/AccuracyOnSyntheticCiphers/Scripts/aggResults.py
|
abramhindle/dorabella-experiments
|
2b7fce05964c184f048db75f6ead80bf81a15d97
|
[
"CC0-1.0"
] | 1
|
2021-03-08T22:07:52.000Z
|
2021-03-08T22:07:52.000Z
|
LanguageIdentification/AccuracyOnSyntheticCiphers/Scripts/aggResults.py
|
abramhindle/dorabella-experiments
|
2b7fce05964c184f048db75f6ead80bf81a15d97
|
[
"CC0-1.0"
] | 1
|
2021-05-14T16:02:28.000Z
|
2021-05-14T16:02:28.000Z
|
uni2K = open("../Results/uni2KResults.txt").read().replace("\n", " ").split()
uni87 = open("../Results/uni87Results.txt").read().replace("\n", " ").split()
trial2K = open("../Results/trial2KResults.txt").read().replace("\n", " ").split()
trialNS2K = open("../Results/trialNS2KResults.txt").read().replace("\n", " ").split()
trial87 = open("../Results/trial87Results.txt").read().replace("\n", " ").split()
trialNS87 = open("../Results/trialNS87Results.txt").read().replace("\n", " ").split()
print("Method", "Length", "Spaces", "MRR", "AvgR", sep="\t", end="\n---------------------------------------\n")
def aggResults(method, length, spaces, data):
MRR = []
AvgRank = []
Total = []
for c, x in enumerate(data):
x = float(x)
if c%3 == 0:
MRR.append(x)
elif c%3 == 1:
AvgRank.append(x)
elif c%3 == 2:
Total.append(x)
print(method, length, spaces, round(sum(MRR)/sum(Total), 2), round(sum(AvgRank)/sum(Total), 2), sep="\t")
aggResults("UNIGRAM", "2000", "No", uni2K)
aggResults("UNIGRAM", "87", "No", uni87)
aggResults("TRIAL", "2000", "Yes", trial2K)
aggResults("TRIAL", "2000", "No", trialNS2K)
aggResults("TRIAL", "87", "Yes", trial87)
aggResults("TRIAL", "87", "No", trialNS87)
| 39.78125
| 111
| 0.567164
|
44d3fe45b80f6a5691dd02b30e0cb97b2a8a0aef
| 2,231
|
py
|
Python
|
dependencies/logger.py
|
4-FLOSS-Free-Libre-Open-Source-Software/dns_warmer
|
1caa74db77b377746973cc94a539928a462ad057
|
[
"MIT"
] | null | null | null |
dependencies/logger.py
|
4-FLOSS-Free-Libre-Open-Source-Software/dns_warmer
|
1caa74db77b377746973cc94a539928a462ad057
|
[
"MIT"
] | null | null | null |
dependencies/logger.py
|
4-FLOSS-Free-Libre-Open-Source-Software/dns_warmer
|
1caa74db77b377746973cc94a539928a462ad057
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys
import time
# define BLACK "\033[30m" /* Black */
# define RED "\033[31m" /* Red */
# define GREEN "\033[32m" /* Green */
# define YELLOW "\033[33m" /* Yellow */
# define BLUE "\033[34m" /* Blue */
# define MAGENTA "\033[35m" /* Magenta */
# define CYAN "\033[36m" /* Cyan */
# define WHITE "\033[37m" /* White */
# define BOLDBLACK "\033[1m\033[30m" /* Bold Black */
# define BOLDRED "\033[1m\033[31m" /* Bold Red */
# define BOLDGREEN "\033[1m\033[32m" /* Bold Green */
# define BOLDYELLOW "\033[1m\033[33m" /* Bold Yellow */
# define BOLDBLUE "\033[1m\033[34m" /* Bold Blue */
# define BOLDMAGENTA "\033[1m\033[35m" /* Bold Magenta */
# define BOLDCYAN "\033[1m\033[36m" /* Bold Cyan */
# define BOLDWHITE "\033[1m\033[37m" /* Bold White */
class bcolors:
HEADER = '\033[1m\033[37m'
OKBLUE = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.ENDC = ''
def dump(msg, msg_type):
try:
msg = str(msg)
except:
pass
try:
msg = msg.decode("utf8", errors="ignore")
except:
pass
dts = time.strftime("%B %d, %Y - %H:%M")
dts_colored = "%s[ %s ]: %s" %(bcolors.HEADER, dts, bcolors.ENDC)
try:
if msg_type == "critical":
print(dts_colored + bcolors.FAIL + msg + bcolors.ENDC)
if msg_type == "warning":
print(dts_colored + bcolors.WARNING + msg + bcolors.ENDC)
if msg_type == "error":
print(dts_colored + bcolors.FAIL + msg + bcolors.ENDC)
if msg_type == "info":
print(dts_colored + bcolors.OKBLUE + msg + bcolors.ENDC)
if msg_type == "good":
print(dts_colored + bcolors.OKGREEN + msg + bcolors.ENDC)
if msg_type == "debug":
print(dts_colored + bcolors.HEADER + msg + bcolors.ENDC)
except:
try:
print(sys.exc_info())
except:
pass
| 28.602564
| 69
| 0.524429
|
1ce4fb43805e982d07bc4a5c87c51875b7584b08
| 1,715
|
py
|
Python
|
activations.py
|
srinathos/swa
|
147a7e8aa780952a2ac680919661b71740b96833
|
[
"BSD-2-Clause"
] | null | null | null |
activations.py
|
srinathos/swa
|
147a7e8aa780952a2ac680919661b71740b96833
|
[
"BSD-2-Clause"
] | null | null | null |
activations.py
|
srinathos/swa
|
147a7e8aa780952a2ac680919661b71740b96833
|
[
"BSD-2-Clause"
] | null | null | null |
import torch
import torch.nn.functional as F
def chabanne_2(x):
# x = x/2
return 0.1992 + 0.5002 * x + 0.1997 * x ** 2
def chabanne_3(x):
return 0.1995 + 0.5002 * x + 0.1994 * x ** 2 - 0.0164 * x ** 3
def chabanne_4(x):
return 0.1500 + 0.5012 * x + 0.2981 * x ** 2 - 0.0004 * x ** 3 - 0.0388 * x ** 4
def chabanne_5(x):
return 0.1488 + 0.4993 * x + 0.3007 * x ** 2 + 0.0003 * x ** 3 - 0.0168 * x ** 5
def chabanne_6(x):
return 0.1249 + 0.5000 * x + 0.3729 * x ** 2 - 0.0410 * x ** 4 + 0.0016 * x ** 6
def d3_v1_pol(x):
return 0.7 * x ** 3 + 0.8 * x ** 2 + 0.2 * x
def d3_v2_pol(x):
return -0.4 * x ** 3 + 0.5 * x ** 2 + 0.9 * x
def softplus_integral(x):
return -0.0005 * x ** 4 + 0.0000 * x ** 3 + 0.0815 * x ** 2 + 0.5000 * x + 0
softplus = torch.nn.Softplus()
def custom_softplus(x):
return x - softplus(x)
def hesam_sigmoid_integral(x):
return -(x * (225234375 * x ** 3 + 443 * x ** 2 - 843750000000000 * x - 937500000000000000)) / 1875000000000000000
def bounded_step_activation(x):
return torch.abs(x) * 0.5
def rectified_polynomial(x):
# Rectifying all negative values first
x = x.clamp(min=0)
return d3_v2_pol(x)
def swish(x, beta=1):
return x * F.sigmoid(beta * x)
def periodic_cos(x):
return torch.cos(x) - x
def periodic_cos_mod(x):
return torch.cos(0.2 * x) - (0.2 * x)
def softplus_polynomial(x):
return -8.043291176102489*10**-14*x**9 -5.409176004846577*10**-11*x**8 +1.464006789445581*10**-10*x**7 +1.2094736421337893*10**-7*x**6 -8.68650047151514*10**-8*x**5 -9.849521136327391*10**-5*x**4 +1.8543655255840298*10**-5*x**3 +0.045459999581864446*x**2 +0.4989722694638288*x +1.1980867140213445
| 23.819444
| 300
| 0.596501
|
7268a4c4c39f2f13c69b11e2318ea4020359a96d
| 322
|
py
|
Python
|
Algorithm_python/Question4.py
|
AtahanKocc/Algorithm_Challenge
|
ea6eea8a5860e4424ac29c3485c058a372e43921
|
[
"MIT"
] | null | null | null |
Algorithm_python/Question4.py
|
AtahanKocc/Algorithm_Challenge
|
ea6eea8a5860e4424ac29c3485c058a372e43921
|
[
"MIT"
] | null | null | null |
Algorithm_python/Question4.py
|
AtahanKocc/Algorithm_Challenge
|
ea6eea8a5860e4424ac29c3485c058a372e43921
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'staircase' function below.
#
# The function accepts INTEGER n as parameter.
#
def staircase(n):
for i in range(1,n+1):
print(f'{"#"*i:>{n}}')
if __name__ == '__main__':
n = int(input().strip())
staircase(n)
| 14
| 46
| 0.63354
|
78003c9ca8fd019c5847388a004fb9848b1914e4
| 4,322
|
py
|
Python
|
conans/test/unittests/model/version_test.py
|
ssaavedra/conan
|
e15dc7902fbbeaf469798a3b9948ead1ecfc8e3c
|
[
"MIT"
] | 6,205
|
2015-12-01T13:40:05.000Z
|
2022-03-31T07:30:25.000Z
|
conans/test/unittests/model/version_test.py
|
ssaavedra/conan
|
e15dc7902fbbeaf469798a3b9948ead1ecfc8e3c
|
[
"MIT"
] | 8,747
|
2015-12-01T16:28:48.000Z
|
2022-03-31T23:34:53.000Z
|
conans/test/unittests/model/version_test.py
|
ssaavedra/conan
|
e15dc7902fbbeaf469798a3b9948ead1ecfc8e3c
|
[
"MIT"
] | 961
|
2015-12-01T16:56:43.000Z
|
2022-03-31T13:50:52.000Z
|
import unittest
from conans.model.version import Version
class VersionTest(unittest.TestCase):
def test_simple(self):
v1 = Version("1.2.3")
self.assertTrue(v1 == "1.2.3")
self.assertTrue(v1 > "1.1")
self.assertTrue(v1 > None)
self.assertTrue(v1 < "1.11")
self.assertTrue(v1 > "1.2")
self.assertTrue(v1 > "1.2.2.2")
self.assertTrue(v1 < "1.2.3.2")
self.assertEqual(v1.major(), "1.Y.Z") # 1.X.Y
self.assertEqual(v1.minor(), "1.2.Z") # 1.2.Y
self.assertEqual(v1.patch(), "1.2.3")
self.assertEqual(v1.pre(), "1.2.3")
self.assertEqual(v1.build, "")
self.assertTrue(v1.compatible("1.X"))
self.assertTrue(v1.compatible("1.2.Y"))
self.assertFalse(v1.compatible("0.X"))
self.assertFalse(v1.compatible("1.2.2"))
v2 = v1.minor()
self.assertTrue(v2.compatible("1.X"))
self.assertTrue(v2.compatible("1.2.3.4"))
self.assertFalse(v2.compatible("1.3.3.4"))
self.assertTrue(v2.major().compatible("1.3.3.4"))
v1 = Version("1.2.rc1")
self.assertTrue(v1 < "1.2.0")
self.assertFalse(v1 < "1.1.9")
self.assertTrue(Version("1.2.1-dev") < Version("1.2.1"))
self.assertTrue(Version("1.2.1-dev") < Version("1.2.2"))
self.assertTrue(Version("1.2.1-dev") < Version("1.3"))
self.assertTrue(Version("1.2.1-dev") < Version("1.3-alpha"))
self.assertTrue(Version("1.2.1-dev") > Version("1.2.0"))
self.assertTrue(Version("1.2.1-dev") > Version("1.2"))
self.assertTrue(Version("1.2.1-dev") > Version("1.2.alpha"))
self.assertTrue(Version("1.2.1-dev") > Version("1.2-alpha"))
self.assertFalse(Version("4") < Version("4.0.0"))
self.assertFalse(Version("4") > Version("4.0.0"))
self.assertFalse(Version("4") != Version("4.0.0"))
self.assertTrue(Version("4") == Version("4.0.0"))
self.assertTrue(Version("4") <= Version("4.0.0"))
self.assertTrue(Version("4") >= Version("4.0.0"))
self.assertTrue(Version("4.0") == Version("4.0.0"))
self.assertTrue(Version("4.0.0") == Version("4.0.0"))
self.assertTrue(Version("4.0.1") != "4")
self.assertFalse(Version("4.0.0.1") == "4")
self.assertTrue(Version("4.0.0.1") >= "4")
def test_build_metadata_is_not_equal(self):
# https://github.com/conan-io/conan/issues/5900
self.assertNotEqual(Version("4.0.0+abc"), Version("4.0.0+xyz"))
# Shouldn't be an "official" order for build metadata, but as they cannot be equal
# the order is alphabetic
self.assertTrue(Version("4.0.0+abc") > Version("4.0.0+xyz"))
self.assertTrue(Version("4.0.0+xyz") < Version("4.0.0+abc"))
def test_text(self):
v1 = Version("master+build2")
self.assertEqual(v1.major(), "master")
self.assertEqual(v1.minor(), "master")
self.assertEqual(v1.patch(), "master")
self.assertEqual(v1.pre(), "master")
self.assertEqual(v1.build, "build2")
self.assertEqual(v1.stable(), "master")
def test_patch(self):
v1 = Version("1.2.3-alpha1+build2")
self.assertEqual(v1.major(), "1.Y.Z")
self.assertEqual(v1.minor(), "1.2.Z")
self.assertEqual(v1.patch(), "1.2.3")
self.assertEqual(v1.pre(), "1.2.3-alpha1")
self.assertEqual(v1.build, "build2")
self.assertEqual(v1.stable(), "1.Y.Z")
v1 = Version("1.2.3+build2")
self.assertEqual(v1.major(), "1.Y.Z")
self.assertEqual(v1.minor(), "1.2.Z")
self.assertEqual(v1.patch(), "1.2.3")
self.assertEqual(v1.pre(), "1.2.3")
self.assertEqual(v1.build, "build2")
self.assertEqual(v1.stable(), "1.Y.Z")
v1 = Version("0.2.3-alpha1+build2")
self.assertEqual(v1.major(), "0.Y.Z")
self.assertEqual(v1.minor(), "0.2.Z")
self.assertEqual(v1.patch(), "0.2.3")
self.assertEqual(v1.pre(), "0.2.3-alpha1")
self.assertEqual(v1.build, "build2")
self.assertEqual(v1.stable(), "0.2.3-alpha1+build2")
def test_build(self):
v1 = Version("0.2.3-alpha1+build2")
self.assertEqual(v1.build, "build2")
v2 = Version("0.2.3+b178")
self.assertEqual(v2.build, "b178")
| 41.161905
| 90
| 0.573577
|
3390a99b624e5f8f142bdcb3f89684cf296326b0
| 6,360
|
py
|
Python
|
Machine Learning Scientist with Python Track/16. Introduction to Deep Learning with Keras/ch3_exercises.py
|
MuhammadAlBarham/datacamp
|
65c180163b1ad235c79d85d4926e586a15a5f78f
|
[
"MIT"
] | 7
|
2020-09-14T00:26:19.000Z
|
2022-02-08T20:53:52.000Z
|
Machine Learning Scientist with Python Track/16. Introduction to Deep Learning with Keras/ch3_exercises.py
|
MuhammadAlBarham/datacamp
|
65c180163b1ad235c79d85d4926e586a15a5f78f
|
[
"MIT"
] | null | null | null |
Machine Learning Scientist with Python Track/16. Introduction to Deep Learning with Keras/ch3_exercises.py
|
MuhammadAlBarham/datacamp
|
65c180163b1ad235c79d85d4926e586a15a5f78f
|
[
"MIT"
] | 8
|
2021-01-12T15:23:20.000Z
|
2022-03-17T12:06:00.000Z
|
# Exercise_1
# Instantiate a Sequential model
model = Sequential()
# Input and hidden layer with input_shape, 16 neurons, and relu
model.add(Dense(16, input_shape = (64,), activation = 'relu'))
# Output layer with 10 neurons (one per digit) and softmax
model.add(Dense(10, activation = 'softmax'))
# Compile your model
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
# Test if your model works and can process input data
print(model.predict(X_train))
--------------------------------------------------
# Exercise_2
#1
# Train your model for 60 epochs, using X_test and y_test as validation data
history = model.fit(X_test, y_test, epochs=60, validation_data=(X_test, y_test), verbose=0)
# Extract from the history object loss and val_loss to plot the learning curve
plot_loss(history.history['loss'], history.history['val_loss'])
#2
selected_option = 2
--------------------------------------------------
# Exercise_3
for size in training_sizes:
# Get a fraction of training data (we only care about the training data)
X_train_frac, y_train_frac = X_train[:size], y_train[:size]
# Reset the model to the initial weights and train it on the new data fraction
model.set_weights(initial_weights)
model.fit(X_train_frac, y_train_frac, epochs = 50, callbacks = [early_stop])
# Evaluate and store the train fraction and the complete test set results
train_accs.append(model.evaluate(X_train_frac, y_train_frac)[1])
test_accs.append(model.evaluate(X_test, y_test)[1])
# Plot train vs test accuracies
plot_results(train_accs, test_accs)
--------------------------------------------------
# Exercise_4
# Activation functions to try
activations = ['relu', 'leaky_relu', 'sigmoid', 'tanh']
# Loop over the activation functions
activation_results = {}
for act in activations:
# Get a new model with the current activation
model = get_model(act)
# Fit the model
history = model.fit(X_train, y_train, validation_data =(X_test, y_test), epochs =20, verbose =0)
activation_results[act] = history
--------------------------------------------------
# Exercise_5
# Create a dataframe from val_loss_per_function
val_loss= pd.DataFrame(val_loss_per_function)
# Call plot on the dataframe
val_loss.plot()
plt.show()
# Create a dataframe from val_acc_per_function
val_acc = pd.DataFrame(val_acc_per_function)
# Call plot on the dataframe
val_acc.plot()
plt.show()
--------------------------------------------------
# Exercise_6
#1
# Get a fresh new model with get_model
model = get_model()
# Train your model for 5 epochs with a batch size of 1
model.fit(X_train, y_train, epochs=5, batch_size = 1)
print("\n The accuracy when using a batch of size 1 is: ",
model.evaluate(X_test, y_test)[1])
#2
model = get_model()
# Fit your model for 5 epochs with a batch of size the training set
model.fit(X_train, y_train, epochs=5, batch_size= 700)
print("\n The accuracy when using the whole training set as a batch was: ",
model.evaluate(X_test, y_test)[1])
--------------------------------------------------
# Exercise_7
# Import batch normalization from keras layers
from keras.layers import BatchNormalization
# Build your deep network
batchnorm_model = Sequential()
batchnorm_model.add(Dense(50, input_shape=(64,), activation='relu', kernel_initializer='normal'))
batchnorm_model.add(BatchNormalization())
batchnorm_model.add(Dense(50, activation='relu', kernel_initializer='normal'))
batchnorm_model.add(BatchNormalization())
batchnorm_model.add(Dense(50, activation='relu', kernel_initializer='normal'))
batchnorm_model.add(BatchNormalization())
batchnorm_model.add(Dense(10, activation='softmax', kernel_initializer='normal'))
# Compile your model with sgd
batchnorm_model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
--------------------------------------------------
# Exercise_8
# Train your standard model, storing its history
history1 = standard_model.fit(X_train, y_train, validation_data=(X_test,y_test), epochs=10, verbose=0)
# Train the batch normalized model you recently built, store its history
history2 = batchnorm_model.fit(X_train, y_train, validation_data=(X_test,y_test), epochs=10, verbose=0)
# Call compare_acc_histories passing in both model histories
compare_histories_acc(history1, history2)
--------------------------------------------------
# Exercise_9
# Creates a model given an activation and learning rate
def create_model(learning_rate=0.01, activation='relu'):
# Create an Adam optimizer with the given learning rate
opt = Adam(lr=activation)
# Create your binary classification model
model = Sequential()
model.add(Dense(128, input_shape=(30,), activation=activation))
model.add(Dense(256, activation=activation))
model.add(Dense(1, activation='sigmoid'))
# Compile your model with your optimizer, loss, and metrics
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
return model
--------------------------------------------------
# Exercise_10
# Import KerasClassifier from keras wrappers
from keras.wrappers.scikit_learn import KerasClassifier
# Create a KerasClassifier
model = KerasClassifier(build_fn = create_model)
# Define the parameters to try out
params = {'activation': ['relu', 'tanh'], 'batch_size': [32, 128, 256],
'epochs': [50, 100, 200], 'learning_rate': [0.1, 0.01, 0.001]}
# Create a randomize search cv object passing in the parameters to try
random_search = RandomizedSearchCV(model, param_distributions = params, cv = KFold(3))
# Running random_search.fit(X,y) would start the search,but it takes too long!
show_results()
--------------------------------------------------
# Exercise_11
# Import KerasClassifier from keras wrappers
from keras.wrappers.scikit_learn import KerasClassifier
# Create a KerasClassifier
model = KerasClassifier(build_fn = create_model, epochs = 50,
batch_size = 128, verbose = 0)
# Calculate the accuracy score for each fold
kfolds = cross_val_score(model, X, y, cv = 3)
# Print the mean accuracy
print('The mean accuracy was:', kfolds.mean())
# Print the accuracy standard deviation
print('With a standard deviation of:', kfolds.std())
--------------------------------------------------
| 33.650794
| 103
| 0.685377
|
ad715dd013baef48a2715b0c59dbdec0d6395574
| 4,029
|
py
|
Python
|
cards/views.py
|
acdh-oeaw/official-depictions
|
d562c1bef88834ea2cae35ed119b2538573249a3
|
[
"MIT"
] | null | null | null |
cards/views.py
|
acdh-oeaw/official-depictions
|
d562c1bef88834ea2cae35ed119b2538573249a3
|
[
"MIT"
] | 12
|
2020-02-11T21:32:03.000Z
|
2022-03-24T08:13:04.000Z
|
cards/views.py
|
acdh-oeaw/official-depictions
|
d562c1bef88834ea2cae35ed119b2538573249a3
|
[
"MIT"
] | null | null | null |
import django_tables2 as tables
from django.conf import settings
from django_tables2.config import RequestConfig
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.urls import reverse, reverse_lazy
from django.views.generic import TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.edit import DeleteView
from browsing.browsing_utils import GenericListView, BaseCreateView, BaseUpdateView
from vocabs.models import SkosConceptScheme, SkosConcept
from . filters import *
from . forms import *
from . tables import *
from . models import Card
class CardCollectionListView(GenericListView):
model = CardCollection
filter_class = CardCollectionListFilter
formhelper_class = CardCollectionFilterFormHelper
table_class = CardCollectionTable
init_columns = [
'abbreviation',
'name',
]
class CardCollectionDetailView(DetailView):
model = CardCollection
template_name = 'cards/cardcollection_detail.html'
class CardCollectionCreate(BaseCreateView):
model = CardCollection
form_class = CardCollectionForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(CardCollectionCreate, self).dispatch(*args, **kwargs)
class CardCollectionUpdate(BaseUpdateView):
model = CardCollection
form_class = CardCollectionForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(CardCollectionUpdate, self).dispatch(*args, **kwargs)
class CardCollectionDelete(DeleteView):
model = CardCollection
template_name = 'webpage/confirm_delete.html'
success_url = reverse_lazy('cards:card_browse')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(CardCollectionDelete, self).dispatch(*args, **kwargs)
class CardListView(GenericListView):
model = Card
filter_class = CardListFilter
formhelper_class = CardFilterFormHelper
table_class = CardTable
init_columns = [
'legacy_id',
'card_collection',
'number',
'thumbnail',
'text_front',
'text_back',
'signature',
]
def get_queryset(self, **kwargs):
user = self.request.user
qs = super(CardListView, self).get_queryset()
if user.is_authenticated:
pass
else:
qs = qs.exclude(public=False)
self.filter = self.filter_class(self.request.GET, queryset=qs)
self.filter.form.helper = self.formhelper_class()
return self.filter.qs
class CardDetailView(DetailView):
model = Card
template_name = 'cards/card_detail.html'
def get_context_data(self, **kwargs):
instance = self.object
context = super(CardDetailView, self).get_context_data(**kwargs)
context['front'] = getattr(instance, 'img_front', None)
context['back'] = getattr(instance, 'img_back', None)
context['openseadragon_js'] = getattr(settings, "APIS_OSD_JS", None)
context['openseadragon_img'] = getattr(settings, "APIS_OSD_IMG_PREFIX", None)
context['iiif_server'] = getattr(settings, "APIS_IIIF_SERVER", None)
return context
class CardCreate(BaseCreateView):
model = Card
form_class = CardForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(CardCreate, self).dispatch(*args, **kwargs)
class CardUpdate(BaseUpdateView):
model = Card
form_class = CardForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(CardUpdate, self).dispatch(*args, **kwargs)
class CardDelete(DeleteView):
model = Card
template_name = 'webpage/confirm_delete.html'
success_url = reverse_lazy('cards:card_browse')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(CardDelete, self).dispatch(*args, **kwargs)
| 29.625
| 85
| 0.712584
|
752cc5e4590e72fa1f1b6f1983f52ad7c7d2ec74
| 848
|
py
|
Python
|
stack/0503_next_greater_element_2.py
|
MartinMa28/Algorithms_review
|
3f2297038c00f5a560941360ca702e6868530f34
|
[
"MIT"
] | null | null | null |
stack/0503_next_greater_element_2.py
|
MartinMa28/Algorithms_review
|
3f2297038c00f5a560941360ca702e6868530f34
|
[
"MIT"
] | null | null | null |
stack/0503_next_greater_element_2.py
|
MartinMa28/Algorithms_review
|
3f2297038c00f5a560941360ca702e6868530f34
|
[
"MIT"
] | null | null | null |
class Solution:
def nextGreaterElements(self, nums: list) -> list:
if not nums:
return []
monotonic_stack = [(nums[0], 0)]
nums = nums + nums
next_greater = {}
for i, num in enumerate(nums[1:], 1):
while monotonic_stack:
if num > monotonic_stack[-1][0]:
val, idx = monotonic_stack.pop()
next_greater[(val, idx)] = num
else:
break
if i < len(nums) // 2:
monotonic_stack.append((num, i))
res = [-1] * (len(nums) // 2)
for i, n in enumerate(nums[:len(nums) // 2]):
if (n, i) in next_greater:
res[i] = next_greater[(n, i)]
return res
| 30.285714
| 54
| 0.415094
|
fa507e43700dfc0078958bec68c623b0aebfea2b
| 13,662
|
py
|
Python
|
src/eatery_db/campus_eatery.py
|
cuappdev/eatery-backend
|
564dbdfe790e844e41a3d2e0ed993c07cc68f2ac
|
[
"MIT"
] | 3
|
2016-02-28T15:45:18.000Z
|
2016-02-29T21:13:54.000Z
|
src/eatery_db/campus_eatery.py
|
cuappdev/eatery-backend
|
564dbdfe790e844e41a3d2e0ed993c07cc68f2ac
|
[
"MIT"
] | 90
|
2016-02-23T23:57:50.000Z
|
2021-09-20T04:23:23.000Z
|
src/eatery_db/campus_eatery.py
|
cuappdev/eatery-backend
|
564dbdfe790e844e41a3d2e0ed993c07cc68f2ac
|
[
"MIT"
] | 4
|
2016-02-23T04:41:58.000Z
|
2020-12-15T22:03:15.000Z
|
from datetime import datetime, timedelta
import requests
from .common_eatery import format_time, get_image_url, parse_coordinates, string_to_date_range
from ..constants import (
NUM_DAYS_STORED_IN_DB,
PAY_METHODS,
STATIC_MENUS_URL,
STATIC_ATTRIBUTES_URL,
TRILLIUM_SLUG,
WEEKDAYS,
get_today,
)
from ..database import CampusEatery, CampusEateryHour, MenuCategory, MenuItem
def parse_campus_eateries(data_json):
"""Parses a Cornell Dining json dictionary.
Returns a list of CampusEatery objects containing on campus dining options.
Args:
data_json (dict): a valid dictionary from the Cornell Dining json
"""
campus_eateries = []
attributes_json = requests.get(STATIC_ATTRIBUTES_URL).json()
for eatery in data_json["data"]["eateries"]:
brbs, cash, cornell_card, credit, mobile, swipes = parse_payments(eatery["payMethods"])
phone = eatery.get("contactPhone", "N/A")
phone = phone if phone else "N/A"
latitude, longitude = parse_coordinates(eatery)
eatery_attributes = attributes_json.get(eatery.get("slug", ""), {})
eatery_exceptions = ";;".join(eatery_attributes.get("exceptions", []))
reserve_url = eatery_attributes.get("reserve_url")
is_get = eatery_attributes.get("is_get", False)
new_eatery = CampusEatery(
about=eatery.get("about", ""),
campus_area_desc=parse_campus_area(eatery),
eatery_type=parse_eatery_type(eatery),
image_url=get_image_url(eatery.get("slug", "")),
latitude=latitude,
location=eatery.get("location", ""),
longitude=longitude,
name=eatery.get("name", ""),
name_short=eatery.get("nameshort", ""),
payment_method_brbs=brbs,
payment_method_cash=cash,
payment_method_cornell_card=cornell_card,
payment_method_credit=credit,
payment_method_mobile=mobile,
payment_method_swipes=swipes,
phone=phone,
slug=eatery.get("slug", ""),
exceptions=eatery_exceptions,
reserve_url=reserve_url,
is_get=is_get,
)
campus_eateries.append(new_eatery)
return campus_eateries
def parse_campus_hours(data_json, eatery_model):
"""Parses a Cornell Dining json dictionary.
Returns 1) a list of tuples of CampusEateryHour objects for a corresponding CampusEatery object and their unparsed
menu 2) an array of the items an eatery serves.
Args:
data_json (dict): a valid dictionary from the Cornell Dining json
eatery_model (CampusEatery): the CampusEatery object to which to link the hours.
"""
eatery_hours_and_menus = []
dining_items = []
for eatery in data_json["data"]["eateries"]:
eatery_slug = eatery.get("slug", "")
if eatery_model.slug == eatery_slug:
dining_items = get_trillium_menu() if eatery_slug == TRILLIUM_SLUG else parse_dining_items(eatery)
hours_list = eatery["operatingHours"]
for hours in hours_list:
new_date = hours.get("date", "")
hours_events = hours["events"]
if hours_events:
for event in hours_events:
start, end = format_time(event.get("start", ""), event.get("end", ""), new_date)
eatery_hour = CampusEateryHour(
eatery_id=eatery_model.id,
date=new_date,
event_description=event.get("descr", ""),
event_summary=event.get("calSummary", ""),
end_time=end,
start_time=start,
)
eatery_hours_and_menus.append((eatery_hour, event.get("menu", [])))
else:
eatery_hour = CampusEateryHour(
eatery_id=eatery_model.id,
date=new_date,
event_description=None,
event_summary=None,
end_time=None,
start_time=None,
)
eatery_hours_and_menus.append((eatery_hour, []))
return eatery_hours_and_menus, dining_items
def parse_menu_categories(menu_json, hour_model, eatery_id):
"""Parses the menu portion of the Cornell Dining json dictionary.
Returns a tuple of a MenuCategory object linked to the provided CampusHours object and the unparsed items of that
MenuCategory.
Args:
menu_json (dict): a valid dictionary from the Cornell Dining json
hours_model (CampusHours): the CampusHours object to which to link the menu.
"""
if not hour_model:
return [(MenuCategory(event_id=None, eatery_id=eatery_id, category=""), menu_json)]
categories_and_items = []
for menu in menu_json:
if menu.get("category"):
new_category = MenuCategory(event_id=hour_model.id, eatery_id=eatery_id, category=menu.get("category"))
categories_and_items.append((new_category, menu.get("items", [])))
elif menu.get("item"):
return [(MenuCategory(event_id=hour_model.id, eatery_id=eatery_id, category=""), menu_json)]
return categories_and_items
def parse_menu_items(items_json, category_model):
"""
Parses the items portion of the Cornell Dining json dictionary.
Returns a list of MenuItems corresponding to a MenuCategory object.
Args:
items_json (dict): a valid dictionary from the Cornell Dining json
category_model (MenuCategory): the MenuCategory object to which to link the menu.
"""
items = []
for item in items_json:
new_item = MenuItem(
category_id=category_model.id, healthy=item.get("healthy", False), item=item.get("item", "")
)
items.append(new_item)
return items
def parse_static_eateries(static_json):
"""Parses a static dining json dictionary.
Similar to the parse_eatery function except for static source.
Args:
static_json (dict): A valid dictionary in the format of the dynamic Cornell Dining json for static eateries
"""
static_eateries = []
attributes_json = requests.get(STATIC_ATTRIBUTES_URL).json()
for eatery in static_json["eateries"]:
brbs, cash, cornell_card, credit, mobile, swipes = parse_payments(eatery["payMethods"])
latitude, longitude = parse_coordinates(eatery)
eatery_attributes = attributes_json.get(eatery.get("slug", ""), {})
eatery_exceptions = ";;".join(eatery_attributes.get("exceptions", []))
new_eatery = CampusEatery(
about=eatery.get("about", ""),
campus_area_desc=parse_campus_area(eatery),
eatery_type=parse_eatery_type(eatery),
image_url=get_image_url(eatery.get("slug", "")),
latitude=latitude,
location=eatery.get("location", ""),
longitude=longitude,
name=eatery.get("name", ""),
name_short=eatery.get("nameshort", ""),
payment_method_brbs=brbs,
payment_method_cash=cash,
payment_method_cornell_card=cornell_card,
payment_method_credit=credit,
payment_method_mobile=mobile,
payment_method_swipes=swipes,
phone=eatery.get("contactPhone", "N/A"),
slug=eatery.get("slug", ""),
exceptions=eatery_exceptions,
reserve_url=eatery_attributes.get("reserve_url"),
is_get=eatery_attributes.get("is_get", False),
)
static_eateries.append(new_eatery)
return static_eateries
def parse_static_op_hours(data_json, eatery_model):
"""Parses a Cornell Dining json dictionary.
Returns a list of tuples of CampusEateryHour objects for a corresponding CampusEatery object and their unparsed
menu.
Args:
data_json (dict): a valid dictionary from the Cornell Dining json
eatery_model (CampusEatery): the CampusEatery object to which to link the hours.
"""
today = get_today()
for eatery in data_json["eateries"]:
if eatery_model.slug == eatery.get("slug", ""):
weekdays = {}
hours_list = eatery.get("operatingHours", [])
dining_items = eatery.get("diningItems")
dates_closed = eatery.get("datesClosed", [])
for hours in hours_list:
if "-" in hours["weekday"]:
start, end = hours["weekday"].split("-")
start_index = WEEKDAYS[start]
end_index = WEEKDAYS[end]
days = []
idx = start_index
while idx != end_index:
days.append(idx)
idx = (idx + 1) % 7
days.append(end_index)
else:
days = [WEEKDAYS[hours["weekday"]]]
for weekday in days:
if weekday not in weekdays:
weekdays[weekday] = hours["events"]
new_operating_hours = []
for i in range(NUM_DAYS_STORED_IN_DB):
new_date = today + timedelta(days=i)
for dates in dates_closed: # check if dates_closed contains new_date
if "-" in dates: # indicates this string is a date range of form "mm/dd/yy-mm/dd/yy"
start_date, end_date = string_to_date_range(dates)
if start_date <= new_date <= end_date:
break
else: # date string is a singular date
closed_date = datetime.strptime(dates, "%m/%d/%y").date()
if new_date == closed_date:
break
else:
# new_date is not present in dates_closed, we can add this date to the db
new_events = weekdays.get(new_date.weekday(), [])
for event in new_events:
start, end = format_time(event.get("start", ""), event.get("end", ""), new_date.isoformat())
new_operating_hours.append(
(
CampusEateryHour(
eatery_id=eatery_model.id,
date=new_date.isoformat(),
event_description=event.get("descr", ""),
event_summary=event.get("calSummary", ""),
end_time=end,
start_time=start,
),
dining_items,
)
)
if not new_events:
new_operating_hours.append(
(CampusEateryHour(eatery_id=eatery_model.id, date=new_date.isoformat()), dining_items)
)
return new_operating_hours
return []
def parse_campus_area(eatery):
"""Parses the common name location of an eatery.
Returns a string containing a description of an eatery
Args:
eatery (dict): A valid json dictionary from Cornell Dining that contains eatery information
"""
if "campusArea" in eatery:
description_short = eatery["campusArea"]["descrshort"]
return description_short
def parse_eatery_type(eatery):
"""Parses the classification of an eatery.
Returns the type of an eatery (dining hall, cafe, etc).
Args:
eatery (dict): A valid json dictionary from Cornell Dining that contains eatery information
"""
try:
return eatery["eateryTypes"][0]["descr"]
except Exception:
return "Unknown"
def parse_payments(methods):
"""Returns a tuple containing Booleans corresponding to the payment methods are available at an
eatery. Follows the format of <brbs>, <cash>, <credit>, <cornell>, <mobile>, <swipes>
Args:
methods (json): a valid json segment for payments from Cornell Dining
"""
brbs = any(pay["descrshort"] == PAY_METHODS["brbs"] for pay in methods)
cash = any(pay["descrshort"] == PAY_METHODS["credit"] for pay in methods)
cornell_card = any(pay["descrshort"] == PAY_METHODS["c-card"] for pay in methods)
credit = any(pay["descrshort"] == PAY_METHODS["credit"] for pay in methods)
mobile = any(pay["descrshort"] == PAY_METHODS["mobile"] for pay in methods)
swipes = any(pay["descrshort"] == PAY_METHODS["swipes"] for pay in methods)
return brbs, cash, cornell_card, credit, mobile, swipes
def parse_dining_items(eatery):
"""Parses the dining items of an eatery.
Returns an array of the items an eatery serves and a flag for healthy
options. Exclusive to non-dining hall eateries.
Args:
eatery (dict): A valid json dictionary from Cornell Dining that contains eatery information
"""
dining_items = []
for item in eatery["diningItems"]:
dining_items.append({"healthy": item.get("healthy", False), "item": item.get("item", "")})
return dining_items
def get_trillium_menu():
"""Gets the Trillium menu.
Returns the Trillium dining items (using parse_dining_items) from the static json source
for menus.
"""
static_json = requests.get(STATIC_MENUS_URL).json()
return parse_dining_items(static_json["Trillium"][0])
| 39.830904
| 118
| 0.597716
|
9f2d28da65fa12e00c89f9d7e0791df302ab8a5e
| 11,801
|
py
|
Python
|
fairseq/modules/dynamic_convolution.py
|
yyeboah/fairseq
|
80e79a03c2b2fdf9889c19c8f4129d379d130e10
|
[
"MIT"
] | 33
|
2021-08-11T12:52:53.000Z
|
2022-03-08T03:03:21.000Z
|
fairseq/modules/dynamic_convolution.py
|
MANGA-UOFA/DSLP
|
a9d3ee154f3bc73b9dfc191ed537ee90b3896956
|
[
"MIT"
] | 10
|
2021-11-14T12:28:48.000Z
|
2022-02-28T14:13:40.000Z
|
fairseq/modules/dynamic_convolution.py
|
MANGA-UOFA/DSLP
|
a9d3ee154f3bc73b9dfc191ed537ee90b3896956
|
[
"MIT"
] | 5
|
2021-11-10T08:55:33.000Z
|
2022-03-14T14:57:48.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
from .unfold import unfold1d
def DynamicConv(
input_size,
kernel_size=1,
padding_l=None,
num_heads=1,
weight_dropout=0.0,
weight_softmax=False,
renorm_padding=False,
bias=False,
conv_bias=False,
query_size=None,
in_proj=False,
):
if torch.cuda.is_available():
try:
from fairseq.modules.dynamicconv_layer import DynamicconvLayer
return DynamicconvLayer(
input_size,
kernel_size=kernel_size,
padding_l=padding_l,
num_heads=num_heads,
weight_dropout=weight_dropout,
weight_softmax=weight_softmax,
renorm_padding=renorm_padding,
bias=bias,
conv_bias=conv_bias,
query_size=query_size,
)
except ImportError as e:
print(e)
return DynamicConv1dTBC(
input_size,
kernel_size=kernel_size,
padding_l=padding_l,
num_heads=num_heads,
weight_dropout=weight_dropout,
weight_softmax=weight_softmax,
renorm_padding=renorm_padding,
bias=bias,
conv_bias=conv_bias,
query_size=query_size,
)
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
@with_incremental_state
class DynamicConv1dTBC(nn.Module):
"""Dynamic lightweight convolution taking T x B x C inputs
Args:
input_size: # of channels of the input
kernel_size: convolution channels
padding_l: padding to the left when using "same" padding
num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size)
weight_dropout: the drop rate of the DropConnect to drop the weight
weight_softmax: normalize the weight with softmax before the convolution
renorm_padding: re-normalize the filters to ignore the padded part (only the non-padding parts sum up to 1)
bias: use bias
conv_bias: bias of the convolution
query_size: specified when feeding a different input as the query
in_proj: project the input and generate the filter together
Shape:
Input: TxBxC, i.e. (timesteps, batch_size, input_size)
Output: TxBxC, i.e. (timesteps, batch_size, input_size)
Attributes:
weight: the learnable weights of the module of shape
`(num_heads, 1, kernel_size)`
bias: the learnable bias of the module of shape `(input_size)`
"""
def __init__(
self,
input_size,
kernel_size=1,
padding_l=None,
num_heads=1,
weight_dropout=0.0,
weight_softmax=False,
renorm_padding=False,
bias=False,
conv_bias=False,
query_size=None,
in_proj=False,
):
super().__init__()
self.input_size = input_size
self.query_size = input_size if query_size is None else query_size
self.kernel_size = kernel_size
self.padding_l = padding_l
self.num_heads = num_heads
self.weight_dropout_module = FairseqDropout(
weight_dropout, module_name=self.__class__.__name__
)
self.weight_softmax = weight_softmax
self.renorm_padding = renorm_padding
if in_proj:
self.weight_linear = Linear(
self.input_size, self.input_size + num_heads * kernel_size * 1
)
else:
self.weight_linear = Linear(
self.query_size, num_heads * kernel_size * 1, bias=bias
)
if conv_bias:
self.conv_bias = nn.Parameter(torch.Tensor(input_size))
else:
self.conv_bias = None
self.reset_parameters()
@property
def in_proj(self):
return (
self.weight_linear.out_features
== self.input_size + self.num_heads * self.kernel_size
)
def reset_parameters(self):
self.weight_linear.reset_parameters()
if self.conv_bias is not None:
nn.init.constant_(self.conv_bias, 0.0)
def forward(self, x, incremental_state=None, query=None, unfold=None):
"""Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C
args:
x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size)
incremental_state: A dict to keep the state
unfold: unfold the input or not. If not, we use the matrix trick instead
query: use the specified query to predict the conv filters
"""
unfold = (
x.size(0) > 512 if unfold is None else unfold
) # use unfold mode as default for long sequence to save memory
unfold = unfold or (incremental_state is not None)
assert query is None or not self.in_proj
if query is None:
query = x
if unfold:
output = self._forward_unfolded(x, incremental_state, query)
else:
output = self._forward_expanded(x, incremental_state, query)
if self.conv_bias is not None:
output = output + self.conv_bias.view(1, 1, -1)
return output
def _forward_unfolded(self, x, incremental_state, query):
"""The conventional implementation of convolutions.
Unfolding the input by having a window shifting to the right."""
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
if self.in_proj:
proj = self.weight_linear(x)
x = proj.narrow(2, 0, self.input_size).contiguous()
weight = (
proj.narrow(2, self.input_size, H * K).contiguous().view(T * B * H, -1)
)
else:
weight = self.weight_linear(query).view(T * B * H, -1)
# renorm_padding is only implemented in _forward_expanded
assert not self.renorm_padding or incremental_state is not None
if incremental_state is not None:
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = x.new()
x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3)
if self.kernel_size > 1:
self._set_input_buffer(
incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :]
)
x_unfold = x_unfold.view(T * B * H, R, -1)
else:
padding_l = self.padding_l
if K > T and padding_l == K - 1:
weight = weight.narrow(1, K - T, T)
K, padding_l = T, T - 1
# unfold the input: T x B x C --> T' x B x C x K
x_unfold = unfold1d(x, K, padding_l, 0)
x_unfold = x_unfold.view(T * B * H, R, K)
if self.weight_softmax and not self.renorm_padding:
weight = F.softmax(weight, dim=1)
weight = weight.narrow(1, 0, K)
if incremental_state is not None:
weight = weight[:, -x_unfold.size(2) :]
K = weight.size(1)
if self.weight_softmax and self.renorm_padding:
weight = F.softmax(weight, dim=1)
weight = self.weight_dropout_module(weight, inplace=False)
output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1
output = output.view(T, B, C)
return output
def _forward_expanded(self, x, incremental_stat, query):
"""Turn the convolution filters into band matrices and do matrix multiplication.
This is faster when the sequence is short, but less memory efficient.
This is not used in the decoder during inference.
"""
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
if self.in_proj:
proj = self.weight_linear(x)
x = proj.narrow(2, 0, self.input_size).contiguous()
weight = (
proj.narrow(2, self.input_size, H * K).contiguous().view(T * B * H, -1)
)
else:
weight = self.weight_linear(query).view(T * B * H, -1)
if not self.renorm_padding:
if self.weight_softmax:
weight = F.softmax(weight, dim=1)
weight = self.weight_dropout_module(weight, inplace=False)
weight = weight.narrow(1, 0, K).contiguous()
weight = weight.view(T, B * H, K).transpose(0, 1)
x = x.view(T, B * H, R).transpose(0, 1)
if self.weight_softmax and self.renorm_padding:
# turn the convolution filters into band matrices
weight_expanded = weight.new(B * H, T, T + K - 1).fill_(float("-inf"))
weight_expanded.as_strided(
(B * H, T, K), (T * (T + K - 1), T + K, 1)
).copy_(weight)
weight_expanded = weight_expanded.narrow(2, self.padding_l, T)
# normalize the weight over valid positions like self-attention
weight_expanded = F.softmax(weight_expanded, dim=2)
weight_expanded = self.weight_dropout_module(weight_expanded, inplace=False)
else:
P = self.padding_l
# For efficieny, we cut the kernel size and reduce the padding when the kernel is larger than the length
if K > T and P == K - 1:
weight = weight.narrow(2, K - T, T)
K, P = T, T - 1
# turn the convolution filters into band matrices
weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False)
weight_expanded.as_strided(
(B * H, T, K), (T * (T + K - 1), T + K, 1)
).copy_(weight)
weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T
output = torch.bmm(weight_expanded, x)
output = output.transpose(0, 1).contiguous().view(T, B, C)
return output
def reorder_incremental_state(self, incremental_state, new_order):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(1, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state, "input_buffer")
def _set_input_buffer(self, incremental_state, new_buffer):
return utils.set_incremental_state(
self, incremental_state, "input_buffer", new_buffer
)
def extra_repr(self):
s = "{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, conv_bias={}, renorm_padding={}, in_proj={}".format(
self.input_size,
self.kernel_size,
self.padding_l,
self.num_heads,
self.weight_softmax,
self.conv_bias is not None,
self.renorm_padding,
self.in_proj,
)
if self.query_size != self.input_size:
s += ", query_size={}".format(self.query_size)
if self.weight_dropout_module.p > 0.0:
s += ", weight_dropout={}".format(self.weight_dropout_module.p)
return s
| 37.945338
| 132
| 0.603932
|
f311bb451eea86c734dbfd96a08b9a1a2241f4a8
| 696
|
py
|
Python
|
rllib/tests/test_local.py
|
firebolt55439/ray
|
215300b070628c06f0106906fc6c03bd70ebf140
|
[
"Apache-2.0"
] | 21,382
|
2016-09-26T23:12:52.000Z
|
2022-03-31T21:47:45.000Z
|
rllib/tests/test_local.py
|
firebolt55439/ray
|
215300b070628c06f0106906fc6c03bd70ebf140
|
[
"Apache-2.0"
] | 19,689
|
2016-09-17T08:21:25.000Z
|
2022-03-31T23:59:30.000Z
|
rllib/tests/test_local.py
|
firebolt55439/ray
|
215300b070628c06f0106906fc6c03bd70ebf140
|
[
"Apache-2.0"
] | 4,114
|
2016-09-23T18:54:01.000Z
|
2022-03-31T15:07:32.000Z
|
import unittest
import ray
from ray.rllib.agents.pg import PGTrainer, DEFAULT_CONFIG
from ray.rllib.utils.test_utils import framework_iterator
class LocalModeTest(unittest.TestCase):
def setUp(self) -> None:
ray.init(local_mode=True)
def tearDown(self) -> None:
ray.shutdown()
def test_local(self):
cf = DEFAULT_CONFIG.copy()
cf["model"]["fcnet_hiddens"] = [10]
cf["num_workers"] = 2
for _ in framework_iterator(cf):
agent = PGTrainer(cf, "CartPole-v0")
print(agent.train())
agent.stop()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| 23.2
| 57
| 0.62931
|
abd2259bf0f44412b06e8a6ee33ea7b016d14414
| 15,562
|
py
|
Python
|
grumers/apps/data/migrations/0011_auto__del_jellyfishincident__add_dailyreport.py
|
socib/grumers
|
ef0b5f8441cf7b3d5a03da715263003d9f550f2f
|
[
"MIT"
] | 1
|
2018-04-27T04:26:38.000Z
|
2018-04-27T04:26:38.000Z
|
grumers/apps/data/migrations/0011_auto__del_jellyfishincident__add_dailyreport.py
|
socib/grumers
|
ef0b5f8441cf7b3d5a03da715263003d9f550f2f
|
[
"MIT"
] | null | null | null |
grumers/apps/data/migrations/0011_auto__del_jellyfishincident__add_dailyreport.py
|
socib/grumers
|
ef0b5f8441cf7b3d5a03da715263003d9f550f2f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'JellyfishIncident'
db.delete_table(u'data_jellyfishincident')
# Adding model 'DailyReport'
db.create_table(u'data_dailyreport', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date_observed', self.gf('django.db.models.fields.DateField')()),
('observation_station', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['data.ObservationStation'], on_delete=models.PROTECT)),
('source', self.gf('django.db.models.fields.CharField')(default='W', max_length=2)),
('sting_incidents', self.gf('django.db.models.fields.IntegerField')(default=0, blank=True)),
('total_incidents', self.gf('django.db.models.fields.IntegerField')(default=0, blank=True)),
('created_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='created-incident', null=True, to=orm['auth.User'])),
('updated_on', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('updated_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='updated-incident', null=True, to=orm['auth.User'])),
))
db.send_create_signal(u'data', ['DailyReport'])
def backwards(self, orm):
# Adding model 'JellyfishIncident'
db.create_table(u'data_jellyfishincident', (
('updated_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='updated-incident', null=True, to=orm['auth.User'], blank=True)),
('date_observed', self.gf('django.db.models.fields.DateField')()),
('created_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('total_incidents', self.gf('django.db.models.fields.IntegerField')(default=0, blank=True)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('sting_incidents', self.gf('django.db.models.fields.IntegerField')(default=0, blank=True)),
('observation_station', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['data.ObservationStation'], on_delete=models.PROTECT)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='created-incident', null=True, to=orm['auth.User'], blank=True)),
('source', self.gf('django.db.models.fields.CharField')(default='W', max_length=2)),
('updated_on', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'data', ['JellyfishIncident'])
# Deleting model 'DailyReport'
db.delete_table(u'data_dailyreport')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'data.dailyreport': {
'Meta': {'ordering': "['-date_observed']", 'object_name': 'DailyReport'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created-incident'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_observed': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'observation_station': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['data.ObservationStation']", 'on_delete': 'models.PROTECT'}),
'source': ('django.db.models.fields.CharField', [], {'default': "'W'", 'max_length': '2'}),
'sting_incidents': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'total_incidents': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'updated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'updated-incident'", 'null': 'True', 'to': u"orm['auth.User']"}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'data.flagchange': {
'Meta': {'ordering': "['-date']", 'object_name': 'FlagChange'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created-flag'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'flag_status': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jellyfish_flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'observation_station': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['data.ObservationStation']", 'on_delete': 'models.PROTECT'}),
'updated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'updated-flag'", 'null': 'True', 'to': u"orm['auth.User']"}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'data.jellyfishobservation': {
'Meta': {'ordering': "['-date_observed']", 'object_name': 'JellyfishObservation'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created-observation'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_observed': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jellyfish_specie': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['data.JellyfishSpecie']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'observation_station': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['data.ObservationStation']", 'on_delete': 'models.PROTECT'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'default': "'W'", 'max_length': '2'}),
'sting_incidents': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'total_incidents': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'updated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'updated-observation'", 'null': 'True', 'to': u"orm['auth.User']"}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'data.jellyfishspecie': {
'Meta': {'ordering': "['order', 'name']", 'object_name': 'JellyfishSpecie'},
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created-specie'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'picture': ('django.db.models.fields.files.ImageField', [], {'default': "'jellyfish_species/no-img.jpg'", 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'updated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'updated-specie'", 'null': 'True', 'to': u"orm['auth.User']"}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'data.observationroute': {
'Meta': {'ordering': "['name']", 'object_name': 'ObservationRoute'},
'code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '10', 'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created-route'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'island': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'municipality': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'route_type': ('django.db.models.fields.CharField', [], {'default': "'C'", 'max_length': '1'}),
'updated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'updated-route'", 'null': 'True', 'to': u"orm['auth.User']"}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'use_incident_form': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'data.observationstation': {
'Meta': {'ordering': "['observation_route', 'order']", 'object_name': 'ObservationStation'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created-station'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'observation_route': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['data.ObservationRoute']", 'on_delete': 'models.PROTECT'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'position': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'station_type': ('django.db.models.fields.CharField', [], {'default': "'S'", 'max_length': '1'}),
'updated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'updated-station'", 'null': 'True', 'to': u"orm['auth.User']"}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['data']
| 87.920904
| 195
| 0.588099
|
563e93c653fe74c3db14f903385a8216c8c79c03
| 3,956
|
py
|
Python
|
scripts/replay_action_log.py
|
prstolpe/rrc_simulation
|
b430fe4e575641cdd64945cf57d0dd67a0eea17a
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/replay_action_log.py
|
prstolpe/rrc_simulation
|
b430fe4e575641cdd64945cf57d0dd67a0eea17a
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/replay_action_log.py
|
prstolpe/rrc_simulation
|
b430fe4e575641cdd64945cf57d0dd67a0eea17a
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
"""Replay actions for a given logfile and verify final object pose.
The log file is a JSON file as produced by
`rrc_simulation.TriFingerPlatform.store_action_log()` which contains the
initial state, a list of all applied actions and the final state of the object.
The simulation is initialised according to the given initial pose and the
actions are applied one by one. In the end, it is verified if the final object
pose in the simulation matches the one in the log file.
The accumulated reward is computed based on the given goal pose and printed in
the end.
Both initial and goal pose are given as JSON strings with keys "position" and
"orientation" (as quaternion). Example:
{"position": [-0.03, 0.07, 0.05], "orientation": [0.0, 0.0, 0.68, -0.73]}
"""
import argparse
import json
import sys
import numpy as np
from rrc_simulation import trifinger_platform
from rrc_simulation.tasks import move_cube
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"--logfile",
"-l",
required=True,
type=str,
help="Path to the log file.",
)
parser.add_argument(
"--difficulty",
"-d",
required=True,
type=int,
help="The difficulty level of the goal (for reward computation).",
)
parser.add_argument(
"--initial-pose",
"-i",
required=True,
type=str,
metavar="JSON",
help="Initial pose of the cube as JSON string.",
)
parser.add_argument(
"--goal-pose",
"-g",
required=True,
type=str,
metavar="JSON",
help="Goal pose of the cube as JSON string.",
)
args = parser.parse_args()
with open(args.logfile, "r") as fh:
log = json.load(fh)
initial_object_pose = move_cube.Pose.from_json(args.initial_pose)
goal_pose = move_cube.Pose.from_json(args.goal_pose)
platform = trifinger_platform.TriFingerPlatform(
visualization=False, initial_object_pose=initial_object_pose
)
# verify that the number of logged actions matches with the episode length
n_actions = len(log["actions"])
assert (
n_actions == move_cube.episode_length
), "Number of actions in log does not match with expected episode length."
accumulated_reward = 0
for logged_action in log["actions"]:
action = platform.Action()
action.torque = np.array(logged_action["torque"])
action.position = np.array(logged_action["position"])
action.position_kp = np.array(logged_action["position_kp"])
action.position_kd = np.array(logged_action["position_kd"])
t = platform.append_desired_action(action)
cube_pose = platform.get_object_pose(t)
reward = -move_cube.evaluate_state(
goal_pose, cube_pose, args.difficulty
)
accumulated_reward += reward
assert logged_action["t"] == t
cube_pose = platform.get_object_pose(t)
final_pose = log["final_object_pose"]
print("Accumulated Reward:", accumulated_reward)
# verify that actual and logged final object pose match
try:
np.testing.assert_array_almost_equal(
cube_pose.position, final_pose["position"], decimal=3,
err_msg=("Recorded object position does not match with the one"
" achieved by the replay")
)
np.testing.assert_array_almost_equal(
cube_pose.orientation, final_pose["orientation"], decimal=3,
err_msg=("Recorded object orientation does not match with the one"
" achieved by the replay")
)
except AssertionError as e:
print("Failed.", file=sys.stderr)
print(e, file=sys.stderr)
sys.exit(1)
print("Passed.")
if __name__ == "__main__":
main()
| 30.90625
| 79
| 0.654954
|
8766dd1de40c51558de4fecf6601b58c14db2d55
| 4,859
|
py
|
Python
|
51job/demo.py
|
kryane/Python-111
|
281c3b1200b168c820fd5c17d34ec6af5057ef9a
|
[
"Apache-2.0"
] | 19
|
2022-02-07T14:43:48.000Z
|
2022-03-22T08:06:53.000Z
|
51job/demo.py
|
huangtc/Python-111
|
c817762130b73e0af2c1e783605b477d688f6262
|
[
"Apache-2.0"
] | null | null | null |
51job/demo.py
|
huangtc/Python-111
|
c817762130b73e0af2c1e783605b477d688f6262
|
[
"Apache-2.0"
] | 11
|
2022-02-07T13:43:20.000Z
|
2022-03-22T08:06:49.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Helaoshi
# @Time : 2021/12/28 14:12
# @File : demo.py
# @Project : pythonProject
import pandas as pd
import requests
list1 = []
for page in range(1, 51):
url = f'https://search.51job.com/list/000000,000000,0000,00,9,99,python,2,{page}.html?lang=c&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='
headers = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
# 'Cookie': '_uab_collina=164067192622033876462843; partner=www_google_com; privacy=1640671920; guid=d3523518dddd8bd7b7965d190ad4301c; acw_tc=2f624a3916406719228003438e3915a063d89bb6e38d817975862124347206; acw_sc__v2=61caaab452c017764723825d655ac1416798b7de; nsearch=jobarea%3D%26%7C%26ord_field%3D%26%7C%26recentSearch0%3D%26%7C%26recentSearch1%3D%26%7C%26recentSearch2%3D%26%7C%26recentSearch3%3D%26%7C%26recentSearch4%3D%26%7C%26collapse_expansion%3D; search=jobarea%7E%60000000%7C%21ord_field%7E%600%7C%21recentSearch0%7E%60000000%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA00%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA0%A1%FB%A1%FApython%A1%FB%A1%FA2%A1%FB%A1%FA1%7C%21; ssxmod_itna=7q+xBDRD0APQq0dD=eYOYkGkYGCiDcApxgAPrD05xYeGzDAxn40iDtxodOAD6ef0GIWGWGSw+axTRRDAW4piCpqe/GDB3DEx06fiQDxii9DCeDIDWeDiDG4GmB4GtDpxG=Djjtz1M6xYPDEjKDaxDbDin8pxGCDeKD0xwFDQKDu69DdjK+82Y1W3vqYbDxLxG1F40HGAf4Lxgf6fGzYt8ERBG4DB4XeADxMF85ee73px0kYq0O9ryzCrgUU2y1xSwWYYxeZGi7ocqdGBW5tG2eeG0DwyxbF0=l3DDWVF29eYD===; ssxmod_itna2=7q+xBDRD0APQq0dD=eYOYkGkYGCiDcApxgAgDnKS4xDsGGeDLYOmYNQBAYcfYqApO8lPCIh+4pgEY+k2CWnivpl3bB5RjtmhDrXseiYqEbN1XnbOFcu7XICk8QAaT/kyuXKsF5qqFS4P8OQ5iG3o9EpO9OomIrQim2h7FWIsSAY5GmouF33Ndkj2e6LpKi6r1ltCWwLyKq9dwqAHEc9QhwYpW3qFc3YbT4b+/rE1udeFrDn2e+RpmA5zxr4ODxawcALEvrERGQ8HvWKmH30W=6n20b=L6IDiKsjE6ILEXXCyoXjldM182UDftgCm22pcrL0cN8YwC2YZxeoO658f1O=oZXLEi44ex4QhEeeIUiRPhZwTzt01UXWi5U53jvjERInpxPUzeUFKIB0u92uO95dxogpvnmv5=LmRKzUnT65v9Rl7YdpPWDG2KG4bDr8nDQiHSDGn3Hk2I353MGc00+/he0FgYGKA4F9nwI2kxe5IkgQDDFqD+EDxD===',
'Cookie': '_uab_collina=164067192622033876462843; partner=www_google_com; privacy=1640671920; guid=d3523518dddd8bd7b7965d190ad4301c; acw_tc=2f624a3916406719228003438e3915a063d89bb6e38d817975862124347206; nsearch=jobarea%3D%26%7C%26ord_field%3D%26%7C%26recentSearch0%3D%26%7C%26recentSearch1%3D%26%7C%26recentSearch2%3D%26%7C%26recentSearch3%3D%26%7C%26recentSearch4%3D%26%7C%26collapse_expansion%3D; search=jobarea%7E%60000000%7C%21ord_field%7E%600%7C%21recentSearch0%7E%60000000%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA00%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA0%A1%FB%A1%FApython%A1%FB%A1%FA2%A1%FB%A1%FA1%7C%21; acw_sc__v2=61caad0d39b8445f11c56e6f1470714f76b6e4f4; acw_sc__v2=61caad0d39b8445f11c56e6f1470714f76b6e4f4; ssxmod_itna=iqGxyDRiiQ94cjDz=IdG=pkSUYAPGCmGUYxBqb4iNDnD8x7YDvmz5D0hibxghDvhTVDQqEi3bxmif5BzmSbYpr5Oi4GLDmKDyY+i4toD48KGwD0eG+DD4DWDmeHDnxAQDjxGP9R6sy=Dbxi3jxiaDGeDe2IODY5DhxDC6APDwx0Cj1h4jpAF=FHpurW+djDkD7ypDlcqLbDkrRfLejH+m8SderDDUnL5d0LEhjGGv7KDXhQDv1H1l2apl/FsBoYNqC44qY2tYYKTMDhTNe4rOi24YChThGmB15V4DDP0li+DD===; ssxmod_itna2=iqGxyDRiiQ94cjDz=IdG=pkSUYAPGCmGUD8ueD5m5D/zWDFxoCktm6POz5L1o9G7/DbYFfs2DnRtmM7zWHQwKUn40QT581vrN7cU7idh=xqeO9drMPdnOfo=SQ9PKxoiApy1XxUO=QQi3wzdLgKjLAe8t0SrXjeRf+7kIAAnv1QTYhbdA3rdEjALrGzBbz+g3HACRO4WNGKYmjFAhmlDYebxopD8cj723scS=+wTEq5bNAQ87p=/Wmpt3p7j8f=KA65Qg7tPhL8gB7SzvL4rAaOIb2ZKOPcUtaMpR2YyP+=ASuToeah2B0eM/91eT5eAFRvEKRZnmx9HqKKyK44ii4kegl0ZAWXWWZn4YuYpm54QGrD2e5b335KLIUYimURPcyF9ubWGuFDbQ42U+aSggUoWAw/m4WlwFKeilFb=8i6ER/4TM27Soib8CU8plKNHbAA8W93b4kxK38QxDKde=7xV0YN7D42xB0=6Z5IbUpTeM0DhDDLxD2YGDD==',
'Host': 'search.51job.com',
'Referer': 'https://search.51job.com/list/000000,000000,0000,00,9,99,python,2,15.html?lang=c&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare=',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'
}
try:
response = requests.get(url, headers=headers).json()
engine_jds = response['engine_jds']
for i in engine_jds:
items = {}
items['创建时间'] = i['issuedate']
items['地址'] = i['workarea_text']
items['岗位'] = i['job_name']
items['学历'] = i['attribute_text'][2]
items['工作经验'] = i['attribute_text'][1]
items['薪资水平'] = i['providesalary_text']
list1.append(items)
print(items)
except Exception as e:
print(e)
df = pd.DataFrame(list1)
df.to_csv('51_job.csv')
| 107.977778
| 1,669
| 0.793167
|
43e2fcd8532bd26154abb2c8be43988739f12e92
| 14,574
|
py
|
Python
|
pyjokes/jokes_it.py
|
r0d0dendr0n/pyjokes
|
382065cba91007302be7fd04c5c35a9957e173b2
|
[
"BSD-3-Clause"
] | 293
|
2015-08-04T13:59:27.000Z
|
2022-03-12T12:43:33.000Z
|
pyjokes/jokes_it.py
|
r0d0dendr0n/pyjokes
|
382065cba91007302be7fd04c5c35a9957e173b2
|
[
"BSD-3-Clause"
] | 88
|
2015-07-26T13:08:16.000Z
|
2022-02-10T07:51:28.000Z
|
pyjokes/jokes_it.py
|
r0d0dendr0n/pyjokes
|
382065cba91007302be7fd04c5c35a9957e173b2
|
[
"BSD-3-Clause"
] | 103
|
2015-08-06T17:27:29.000Z
|
2022-02-28T19:50:29.000Z
|
# -*- coding: utf-8 -*-
"""
Jokes from stackoverflow - provided under CC BY-SA 3.0
http://stackoverflow.com/questions/234075/what-is-your-best-programmer-joke?page=4&tab=votes#tab-top
"""
neutral = [
"Trionfalmente, Beth ha rimosso Python 2.7 dal server nel 2020.'Finalmente!' ha detto con gioia, solo per vedere l'annuncio di Python 4.4.",
"Una query SQL entra in un bar, cammina verso a due table e chiede: 'Posso unirvi?'.",
"Quando il tuo martello e` C ++, tutto inizia a sembrare un pollice.",
"Se metti un milione di scimmie su un milione di tastiere, uno di loro alla fine scrivera` un programma Java, il resto scrivera` Perl.",
"Per comprendere la ricorsione devi prima capire la ricorsione.",
"Gli amici non permettono agli amici di usare Python 2.7.",
"Ho suggerito di tenere un 'Python Object Oriented Programming Seminar', ma l'acronimo era impopolare.",
"'toc, toc'. 'Chi e` la`?' ... pausa molto lunga ... Java.",
"Quanti programmatori ci vogliono per cambiare una lampadina? Nessuno, e` un problema hardware.",
"Qual e` il modo orientato agli oggetti per diventare ricchi? Ereditarieta`.",
"Quanti programmatori ci vogliono per cambiare una lampadina? Nessuno, possono rendere l'oscurita` uno standard.",
"I vecchi programmatori C non muoiono, sono solo gettati nel void.",
"Gli sviluppatori di software amano risolvere i problemi: se non ci sono problemi facilmente disponibili li creeranno.",
".NET e` stato chiamato .NET in modo che non si visualizzasse in un elenco di directory Unix.",
"Hardware: la parte di un computer che puoi calciare.",
"Un programmatore e` stato trovato morto nella doccia, accanto al corpo c'era uno shampoo con le istruzioni:Insapona, risciacqua ripeti.",
"Ottimista: il bicchiere e` mezzo pieno Pessimista: il bicchiere e` mezzo vuoto Programmatore: il bicchiere e` il doppio del necessario.",
"In C abbiamo dovuto codificare i nostri bug. In C ++ possiamo ereditarli.",
"Come mai non c'e` una gara Perl offuscato? Perche` tutti vincerebbero.",
"Se riproduci un CD di Windows all'indietro, ascolterai il canto satanico ... peggio ancora, se lo riproduci in avanti, installa Windows.",
"Quanti programmatori ci vogliono per uccidere uno scarafaggio? Due: uno tiene, l'altro installa Windows su di esso.",
"Come si chiama un programmatore finlandese? Nerdic.",
"Cosa ha detto il codice Java al codice C? : Non hai classe.",
"Perche` Microsoft ha chiamato il proprio motore di ricerca BING? Because It's Not Google.",
"I venditori di software e i venditori di auto usate si differenziano perche` questi ultimi sanno quando mentono.",
"Bambino: 'papa', perche` il sole sorge ad est e tramonta ad ovest?' Papa': 'figlio, sta funzionando, non toccarlo'.",
"Quanti programmatori Prolog ci vogliono per cambiare una lampadina? Falso.",
"I veri programmatori possono scrivere codice assembly in qualsiasi lingua.",
"Cameriere: 'le piacerebbe un caffe` o un te`?' Programmatore: 'Si'.",
"Un programmatore entra in un foo ...",
"Qual e` il secondo nome di Benoit B. Mandelbrot? Benoit B. Mandelbrot.",
"Perche` sorridi sempre? Questa e` solo la mia ... espressione regolare.",
"Domanda stupida ASCII, ottiene uno stupido ANSI.",
"Un programmatore aveva un problema: penso` tra se stesso: 'lo risolvo con i threads!', ora ha due problemi.",
"Java: scrivi una volta e scappa.",
"Ti direi una battuta su UDP, ma non lo capiresti mai.",
"Un ingegnere di QA entra in un bar, si imbatte in un bar, striscia in un bar, balla in un bar, punta i piedi in un bar...",
"Ho avuto un problema quindi ho pensato di utilizzare Java. Ora ho una ProblemFactory.",
"L'ingegnere del QA entra in un bar, ordina una birra, ordina 0 birre, 99999 birre, una lucertola, -1 birre, uno sfdeljknesv.",
"Un responsabile di prodotto entra in un bar, chiede un drink, il barista dice NO, ma prendera` in considerazione l'aggiunta successiva.",
"Come si genera una stringa casuale? Metti uno studente di Informatica del primo anno davanti a Vim e gli chiedi di salvare ed uscire.",
"Uso Vim da molto tempo ormai, principalmente perche` non riesco a capire come uscire.",
"Come fai a sapere se una persona e` un utente Vim? Non ti preoccupare, te lo diranno.",
"un cameriere urla: 'sta soffocando! Qualcuno e` un dottore?' Programmatore: 'sono un utente Vim'.",
"3 Database Admins sono entrati in un bar NoSQL e poco dopo sono usciti perche` non sono riusciti a trovare un table.",
"Come spiegare il film Inception a un programmatore? Quando esegui una VM dentro una VM dentro un' altra VM tutto procede molto lentamente.",
"Come si chiama un pappagallo che dice 'Squawk! Pezzi di nove! Pezzi di nove!' Un errore a pappagallo.",
"Ci sono solo due problemi difficili in Informatica: invalidazione della cache, denominazione delle cose e errori di off-by-one.",
"Ci sono 10 tipi di persone: quelli che comprendono il binario e quelli che non lo sanno.",
"Ci sono 2 tipi di persone: quelli che possono estrapolare dati da insiemi incompleti ...",
"Esistono II tipi di persone: quelli che comprendono i numeri romani e quelli che non li conoscono.",
"Ci sono 10 tipi di persone: quelli che comprendono l'esadecimale e altri 15.",
"Ci sono 10 tipi di persone: quelli che capiscono il trinario, quelli che non lo fanno e quelli che non ne hanno mai sentito parlare.",
"Come chiami otto hobbit? Un hob byte.",
"La cosa migliore di un booleano e` che anche se ti sbagli, sei solo fuori di un bit.",
"Un buon programmatore e` qualcuno che guarda sempre in entrambe le direzioni prima di attraversare una strada a senso unico.",
"Esistono due modi per scrivere programmi privi di errori: solo il terzo funziona.",
"I controlli di qualita` consistono nel 55% di acqua, 30% di sangue e 15% di ticket in Jira.",
"Quanti QA servono per cambiare una lampadina? Hanno notato che la stanza era buia,: non risolvono i problemi, li trovano.",
"Un programmatore si schianta contro un'auto , l'uomo chiede 'cosa e` successo', l'altro risponde'Non so. Facciamo il backup e riprova'.",
"Scrivere PHP e` come fare pipi` in piscina, tutti lo hanno fatto, ma non hanno bisogno di renderlo pubblico.",
"Numero di giorni da quando ho riscontrato un errore di indice di array: -1.",
"gli appuntamenti veloci sono inutili, 5 minuti non sono sufficienti per spiegare correttamente i benefici della filosofia Unix.",
"Microsoft ha ogni quindici giorni una 'settimana produttiva' dove usa Google invece di Bing.",
"Trovare un buon sviluppatore PHP e` come cercare un ago in un pagliaio o e` un hackstack in un ago?.",
"Unix e` user friendly, e` solo molto particolare nella scelta di chi siano i suoi amici.",
"Un programmatore COBOL guadagna milioni con la riparazione Y2K e decide di congelarsi criogenicamente. L'anno e` 9999.",
"Il linguaggio C combina tutta la potenza del linguaggio assembly con tutta la facilita` d'uso del linguaggio assembly.",
"Un esperto SEO entra in un bar, pub, pub irlandese, taverna, barista, birra, liquore, vino, alcolici, liquori ...",
"Che cosa significa Emacs? Utilizzato esclusivamente dagli scienziati informatici di mezza eta`.",
"Che cosa hanno in comune le battute di PyJokes con Adobe Flash? Si aggiornano sempre, ma non migliorano.",
"Quanti demosceners sono necessari per cambiare una lampadina? Meta`. Con uno intero non ci sono sfide.",
]
"""
Jokes from The Internet Chuck Norris DB (ICNDB) (http://www.icndb.com/) - provided under CC BY-SA 3.0
http://api.icndb.com/jokes/
"""
chuck = [
"Tutti gli array che Chuck Norris dichiara sono di dimensioni infinite, perche` Chuck Norris non conosce limiti.",
"Chuck Norris non ha la latenza del disco perche` il disco rigido sa sbrigarsi, altrimenti sono guai.",
"Chuck Norris scrive codice che si ottimizza da solo.",
"Chuck Norris non puo` testare l'uguaglianza perche` non ha eguali.",
"Chuck Norris non ha bisogno di garbage collection perche` non chiama .Dispose (), chiama .DropKick ().",
"Il primo programma di Chuck Norris e` stato kill -9.",
"Chuck Norris ha scoppiato la bolla delle dot com.",
"Tutti i browser supportano le definizioni esadecimali #chuck e #norris per i colori nero e blu.",
"MySpace non e` proprio il tuo spazio, e` di Chuck (te lo lascia solo usare).",
"Chuck Norris puo` scrivere funzioni infinitamente ricorsive e farle tornare.",
"Chuck Norris puo` risolvere le Torri di Hanoi in una mossa.",
"L'unico modello di design che Chuck Norris conosce e` il God Object Pattern.",
"Chuck Norris ha terminato World of Warcraft.",
"I project manager non chiedono mai a Chuck Norris le stime.",
"Chuck Norris non usa gli standard web in quanto il web si conformera` a lui.",
"'Funziona sulla mia macchina' e` sempre vero per Chuck Norris.",
"Chuck Norris non fa i grafici di Burn Down, fa i grafici di Smack Down.",
"Chuck Norris puo` cancellare il cestino.",
"La barba di Chuck Norris puo` scrivere 140 parole al minuto.",
"Chuck Norris puo` testare tutte le applicazioni con un'unica affermazione, 'funziona'.",
"La tastiera di Chuck Norris non ha un tasto Ctrl perche` niente controlla Chuck Norris.",
"Chuck Norris puo` far traboccare il tuo stack solo guardandolo.",
"Per Chuck Norris, tutto contiene una vulnerabilita`.",
"Chuck Norris non usa sudo, la shell sa solo che e` lui e fa quello che gli viene detto.",
"Chuck Norris non ha bisogno di un debugger, si limita a fissare il codice finche` non confessa.",
"Chuck Norris puo` accedere a metodi privati.",
"Chuck Norris puo` istanziare una classe astratta.",
"L'oggetto classe eredita da Chuck Norris.",
"Chuck Norris conosce l'ultima cifra del Pi greco.",
"La connessione di Chuck Norris e` piu' veloce in up che in down perche` i dati sono incentivati a correre via da lui.",
"Nessuna affermazione puo` prendere la ChuckNorrisException.",
"Chuck Norris puo` scrivere applicazioni multi-thread con un singolo thread.",
"Chuck Norris non ha bisogno di usare AJAX perche` le pagine hanno troppa paura di postback comunque.",
"Chuck Norris non usa la riflessione, la riflessione chiede educatamente il suo aiuto.",
"Non c'e` alcun tasto Esc sulla tastiera di Chuck Norris, perche` nessuno sfugge a Chuck Norris.",
"Chuck Norris puo` eseguire la ricerca binaria di dati non ordinati.",
"Chuck Norris non ha bisogno di tentativi di cattura, le eccezioni sono troppo spaventate da sollevarsi.",
"Chuck Norris e` uscito da un ciclo infinito.",
"Se Chuck Norris scrive codice con bug, gli errori si risolvono da soli.",
"L'hosting di Chuck Norris e` garantito al 101% di uptime.",
"La tastiera di Chuck Norris ha il tasto Any.",
"Chuck Norris puo` accedere al database dall'interfaccia utente.",
"I programmi di Chuck Norris non escono mai, sono terminati.",
"I programmi di Chuck Norris occupano il 150% della CPU, anche quando non sono in esecuzione.",
"Chuck Norris puo` generare thread che si completano prima di essere avviati.",
"I programmi di Chuck Norris non accettano input.",
"Chuck Norris puo` installare iTunes senza installare Quicktime.",
"Chuck Norris non ha bisogno di un sistema operativo.",
"Il modello di rete OSI di Chuck Norris ha un solo livello: fisico.",
"Chuck Norris puo` compilare errori di sintassi.",
"Chuck Norris non ha bisogno di digitare cast. Il Chuck-Norris Compiler (CNC) vede attraverso le cose, fino in fondo sempre.",
"Chuck Norris comprime i suoi file con un calcio rotante sul disco rigido.",
"Con Chuck Norris P = NP. Non c'e` alcun nondeterminismo con le decisioni di Chuck Norris.",
"Chuck Norris puo` recuperare qualsiasi cosa da / dev / null.",
"Nessuno ha mai programmato in coppia con Chuck Norris ed e`vissuto per raccontare la storia.",
"Nessuno ha mai parlato durante la revisione del codice di Chuck Norris ed e` vissuto per raccontare la storia.",
"Chuck Norris non usa una GUI, preferisce la linea di comando.",
"Chuck Norris non usa Oracle, lui e` l'Oracle.",
"Chuck Norris puo` dereferenziare NULL.",
"Una differenza tra il tuo codice e quello di Chuck Norris e` infinita.",
"Il plugin Chuck Norris Eclipse e` diventato un contatto alieno.",
"Chuck Norris e` l'ultimo mutex, tutti i thread lo temono.",
"Non preoccuparti dei test, i test case di Chuck Norris coprono anche il tuo codice.",
"Le dichiarazioni del registro di Chuck Norris sono sempre al livello FATAL.",
"Chuck Norris ha completato World of Warcraft.",
"Quando Chuck Norris rompe la build, non e` possibile risolverla, perche` non c'e` una sola riga di codice.",
"Chuck Norris scrive con un dito, lo punta alla tastiera e la tastiera fa il resto.",
"I programmi di Chuck Norris possono superare il test di Turing fissando l'interrogatore.",
"Se provi kill -9 con i programmi di Chuck Norris, si ritorce contro.",
"Chuck Norris esegue loop infiniti in meno di 4 secondi.",
"Chuck Norris puo` sovrascrivere una variabile bloccata.",
"Chuck Norris conosce il valore di NULL.",
"Chuck Norris puo` installare un sistema operativo a 64 bit su macchine a 32 bit.",
"Chuck Norris puo` scrivere su un flusso di output.",
"Chuck Norris puo` leggere da un flusso di input.",
"Chuck Norris non ha mai scritto il suo programma in codice macchina. Le macchine hanno imparato a interpretare il codice di Chuck Norris.",
"I test unitari di Chuck Norris non girano, muoiono.",
"Chuck Norris causa la schermata blu della morte.",
"Chuck Norris puo` fare una classe che e` sia astratta che finale.",
"Chuck Norris potrebbe usare qualsiasi cosa in java.util.* per ucciderti, inclusi i javadoc.",
"Il codice gira piu` velocemente quando Chuck Norris lo guarda.",
"Chuck Norris non usa REST, lui aspetta.",
"Su Facebook a tutti piace Chuck Norris, che lo scelgano o no.",
"Non puoi seguire Chuck Norris su Twitter, perche` lui ti segue.",
"La calcolatrice di Chuck Norris ha solo 3 tasti: 0, 1 e NAND.",
"Chuck Norris utilizza solo variabili globali. Non ha nulla da nascondere.",
"Chuck Norris scrive direttamente in binario. Quindi scrive il codice sorgente come documentazione per altri programmatori.",
]
jokes_it = {
'neutral': neutral,
'chuck': chuck,
'all': neutral + chuck,
}
| 79.639344
| 145
| 0.727734
|
7b191347b98ae4c00435b6eecae4ff90355437ef
| 1,412
|
py
|
Python
|
dlk/python/dlk/core/model.py
|
progrunner17/blueoil
|
5cbe8b2ceebaaa7a6582a377031ae92855bed0aa
|
[
"Apache-2.0"
] | 1
|
2019-10-09T04:41:02.000Z
|
2019-10-09T04:41:02.000Z
|
dlk/python/dlk/core/model.py
|
progrunner17/blueoil
|
5cbe8b2ceebaaa7a6582a377031ae92855bed0aa
|
[
"Apache-2.0"
] | 1
|
2019-02-07T12:20:52.000Z
|
2019-02-08T07:22:48.000Z
|
dlk/python/dlk/core/model.py
|
progrunner17/blueoil
|
5cbe8b2ceebaaa7a6582a377031ae92855bed0aa
|
[
"Apache-2.0"
] | 2
|
2019-02-08T10:03:34.000Z
|
2019-03-20T06:25:55.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Model module. It contains model info, which includes graph inside."""
from core.graph import Graph
class Model(object):
"""Class that represents a model."""
def __init__(self) -> None:
"""Init the model. Currently, just generates a blank graph inside."""
self.__graph: Graph = Graph()
@property
def graph(self) -> Graph:
"""Return the graph in this model."""
return self.__graph
@graph.setter
def graph(self, val: Graph) -> None:
del self.__graph
self.__graph = val
def is_valid_graph(self) -> bool:
"""Return if the graph is a valid one. This is just for testing."""
return self.__graph.check_nodes()
| 35.3
| 79
| 0.646601
|
92ed16359e6dcfb6bab57762fca98b145efab0dc
| 3,380
|
py
|
Python
|
configs/_base_/models/faster_rcnn_r50_fpn.py
|
deepakksingh/mmdetection
|
b0d845f1fecf8064db30ef6b456b6ef5f36fa40f
|
[
"Apache-2.0"
] | 295
|
2020-07-16T13:03:29.000Z
|
2022-03-29T05:20:12.000Z
|
configs/_base_/models/faster_rcnn_r50_fpn.py
|
deepakksingh/mmdetection
|
b0d845f1fecf8064db30ef6b456b6ef5f36fa40f
|
[
"Apache-2.0"
] | 136
|
2021-07-11T11:26:54.000Z
|
2022-03-31T02:45:34.000Z
|
configs/_base_/models/faster_rcnn_r50_fpn.py
|
deepakksingh/mmdetection
|
b0d845f1fecf8064db30ef6b456b6ef5f36fa40f
|
[
"Apache-2.0"
] | 84
|
2021-05-29T06:58:14.000Z
|
2022-03-31T07:44:10.000Z
|
model = dict(
type='FasterRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
| 30.727273
| 77
| 0.532544
|
1282c8a66d1b7100c97d18f6e669ff3b928a2477
| 1,253
|
py
|
Python
|
nextstrain/cli/command/update.py
|
mlarrousse/cli
|
808314eb1ac74231e4500945b0802b235985bf88
|
[
"MIT"
] | 22
|
2019-11-06T01:47:30.000Z
|
2022-03-29T15:39:27.000Z
|
nextstrain/cli/command/update.py
|
mlarrousse/cli
|
808314eb1ac74231e4500945b0802b235985bf88
|
[
"MIT"
] | 141
|
2018-06-29T23:45:45.000Z
|
2022-03-31T21:02:03.000Z
|
nextstrain/cli/command/update.py
|
mlarrousse/cli
|
808314eb1ac74231e4500945b0802b235985bf88
|
[
"MIT"
] | 18
|
2018-07-24T16:33:52.000Z
|
2021-02-16T20:12:11.000Z
|
"""
Updates your local copy of the default container image.
This may take several minutes as the layers of the image are downloaded.
"""
from functools import partial
from ..util import colored, check_for_new_version
from ..runner import all_runners
def register_parser(subparser):
parser = subparser.add_parser("update", help = "Update your local image copy")
return parser
def run(opts):
# Check our own version for updates
newer_version = check_for_new_version()
success = partial(colored, "green")
failure = partial(colored, "red")
notice = partial(colored, "yellow")
statuses = [
runner.update()
for runner in all_runners
]
# Print overall status
all_good = False not in statuses
if all_good:
print()
print(success("Your images are up to date!"))
if newer_version:
print()
print(notice("…but consider upgrading nextstrain-cli too, as noted above."))
else:
print()
print(failure("Updating images failed"))
if newer_version:
print()
print(notice("Maybe upgrading nextstrain-cli, as noted above, will help?"))
# Return a 1 or 0 exit code
return int(not all_good)
| 26.104167
| 88
| 0.652035
|
ccb213fd91b33d2719b671ee32ae3816418d68a5
| 473
|
py
|
Python
|
yatube/posts/admin.py
|
ATIMSRU/yatube_project
|
8d7818c312a2ec311778b1e25b24bbe4715d55ec
|
[
"MIT"
] | null | null | null |
yatube/posts/admin.py
|
ATIMSRU/yatube_project
|
8d7818c312a2ec311778b1e25b24bbe4715d55ec
|
[
"MIT"
] | null | null | null |
yatube/posts/admin.py
|
ATIMSRU/yatube_project
|
8d7818c312a2ec311778b1e25b24bbe4715d55ec
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Group, Post
# Register your models here.
class PostAdmin(admin.ModelAdmin):
list_display = ('pk', 'text', 'pub_date', 'author', 'group')
list_editable = ('group',)
search_fields = ('text',)
list_filter = ('pub_date',)
# Это свойство сработает для всех колонок: где пусто — там будет эта строка
empty_value_display = '-пусто-'
admin.site.register(Post, PostAdmin)
admin.site.register(Group)
| 31.533333
| 80
| 0.697674
|
8230c6bf63d64026443f6ad65e1c991017c3119e
| 1,724
|
py
|
Python
|
disc_tool/social_graph.py
|
tomasff/disc
|
e10ab5d36920254ef1ae50cd3f53f65edd1f8ea4
|
[
"MIT"
] | 1
|
2022-03-28T11:20:10.000Z
|
2022-03-28T11:20:10.000Z
|
disc_tool/social_graph.py
|
tomasff/disc
|
e10ab5d36920254ef1ae50cd3f53f65edd1f8ea4
|
[
"MIT"
] | null | null | null |
disc_tool/social_graph.py
|
tomasff/disc
|
e10ab5d36920254ef1ae50cd3f53f65edd1f8ea4
|
[
"MIT"
] | null | null | null |
from enum import Enum, unique, auto
from datetime import datetime
import networkx as nx
@unique
class InteractionType(Enum):
MESSAGE_REACTION = auto()
MESSAGE_REPLY = auto()
MESSAGE_MENTION = auto()
class Interaction:
def __init__(self, user1, user2, recorded_at, type):
self.user1 = user1
self.user2 = user2
self.type = type
self.recorded_at = recorded_at
def weight(self, t_now, half_life):
return 0.5 ** ((t_now - self.recorded_at).total_seconds() / half_life)
class SocialInteractionGraph:
def __init__(self, name, weights, half_life=172800):
self.name = name
self.t_now = datetime.now()
self.weights = weights
self.half_life = half_life
self.graph = nx.Graph(
name=self.name,
half_life=self.half_life,
)
def _calc_edge_weight(self, interaction):
return self.weights[interaction.type.name] * interaction.weight(
self.t_now, self.half_life
)
def add_interaction(self, interaction):
user1, user2 = interaction.user1, interaction.user2
weight = self._calc_edge_weight(interaction)
if weight == 0:
return
self.graph.add_node(user1)
self.graph.add_node(user2)
if not self.graph.has_edge(user1, user2):
self.graph.add_edge(user1, user2, weight=0)
self.graph[user1][user2]["weight"] += self._calc_edge_weight(interaction)
@property
def edges(self):
return self.graph.number_of_edges()
@property
def nodes(self):
return self.graph.number_of_nodes()
def save(self):
nx.write_gexf(self.graph, f"{self.name}.gexf")
| 24.985507
| 81
| 0.636311
|
2d1f4287b53d4d9575a419729927d56fc44c0f01
| 5,801
|
py
|
Python
|
MPU6050_cal.py
|
bunny1985/mpu6050_DMP_python
|
cb2ffaa55514ee63792ec9738e0c4be064b4c344
|
[
"MIT"
] | 6
|
2021-05-18T19:44:48.000Z
|
2022-03-03T06:47:01.000Z
|
MPU6050_cal.py
|
bunny1985/mpu6050_DMP_python
|
cb2ffaa55514ee63792ec9738e0c4be064b4c344
|
[
"MIT"
] | null | null | null |
MPU6050_cal.py
|
bunny1985/mpu6050_DMP_python
|
cb2ffaa55514ee63792ec9738e0c4be064b4c344
|
[
"MIT"
] | 1
|
2021-09-03T14:06:06.000Z
|
2021-09-03T14:06:06.000Z
|
from MPU6050RPI import MPU6050
from SimplePID import SimplePID
import struct
def to_short(somebytes):
return struct.unpack('>h', somebytes)[0]
def avg_from_array(a_array):
sum = 0.0
for index in range(0, len(a_array)):
if isinstance(a_array[index], (bytes, bytearray)):
a_array[index] = to_short(a_array[index])
sum += a_array[index]
return sum / len(a_array)
i2c_bus = 1
device_address = 0x68
# The offsets are different for each device and should be changed
# accordingly using a calibration procedure
x_accel_offset = 0
y_accel_offset = 0
z_accel_offset = 0
x_gyro_offset = 0
y_gyro_offset = 0
z_gyro_offset = 0
enable_debug_output = True
mpu = MPU6050(i2c_bus, device_address, x_accel_offset, y_accel_offset,
z_accel_offset, x_gyro_offset, y_gyro_offset, z_gyro_offset,
enable_debug_output)
kp = 0.03125
ki = 0.25
kd = 0
pidax = SimplePID(0, -150, 15000, kp, ki, kd, 100, True)
piday = SimplePID(0, -150, 15000, kp, ki, kd, 100, True)
pidaz = SimplePID(0, -150, 15000, kp, ki, kd, 100, True)
pidgx = SimplePID(0, -150, 15000, kp, ki, kd, 100, True)
pidgy = SimplePID(0, -150, 15000, kp, ki, kd, 100, True)
pidgz = SimplePID(0, -15000, 15000, kp, ki, kd, 100, True)
accel_reading = mpu.get_acceleration()
x_accel_reading = accel_reading[0]
y_accel_reading = accel_reading[1]
z_accel_reading = accel_reading[2]
x_accel_avg = [0] * 100
y_accel_avg = [0] * 100
z_accel_avg = [0] * 100
x_accel_offset_avg = [0] * 100
y_accel_offset_avg = [0] * 100
z_accel_offset_avg = [0] * 100
axindex = 0
ayindex = 0
azindex = 0
gyro_reading = mpu.get_rotation()
x_gyro_reading = gyro_reading[0]
y_gyro_reading = gyro_reading[1]
z_gyro_reading = gyro_reading[2]
x_gyro_avg = [0] * 100
y_gyro_avg = [0] * 100
z_gyro_avg = [0] * 100
x_gyro_offset_avg = [0] * 100
y_gyro_offset_avg = [0] * 100
z_gyro_offset_avg = [0] * 100
gxindex = 0
gyindex = 0
gzindex = 0
try:
while True:
accel_reading = mpu.get_acceleration()
x_accel_reading = accel_reading[0]
y_accel_reading = accel_reading[1]
z_accel_reading = accel_reading[2]
gyro_reading = mpu.get_rotation()
x_gyro_reading = gyro_reading[0]
y_gyro_reading = gyro_reading[1]
z_gyro_reading = gyro_reading[2]
if pidax.check_time():
x_accel_offset = pidax.get_output_value(to_short(x_accel_reading))
mpu.set_x_accel_offset(int(x_accel_offset))
x_accel_avg[axindex] = x_accel_reading
x_accel_offset_avg[axindex] = x_accel_offset
axindex += 1
if axindex == len(x_accel_avg):
axindex = 0
print('x_avg_read: ' +
str(avg_from_array(x_accel_avg)) +
' x_avg_offset: ' +
str(avg_from_array(x_accel_offset_avg)))
print('y_avg_read: ' +
str(avg_from_array(y_accel_avg)) +
' y_avg_offset: ' +
str(avg_from_array(y_accel_offset_avg)))
print('z_avg_read: ' +
str(avg_from_array(z_accel_avg)) +
' z_avg_offset: ' +
str(avg_from_array(z_accel_offset_avg)))
if piday.check_time():
y_accel_offset = piday.get_output_value(to_short(y_accel_reading))
mpu.set_y_accel_offset(int(y_accel_offset))
y_accel_avg[ayindex] = y_accel_reading
y_accel_offset_avg[ayindex] = y_accel_offset
ayindex += 1
if ayindex == len(y_accel_avg):
ayindex = 0
if pidaz.check_time():
z_accel_offset = pidaz.get_output_value(to_short(z_accel_reading))
mpu.set_z_accel_offset(int(z_accel_offset))
z_accel_avg[azindex] = z_accel_reading
z_accel_offset_avg[azindex] = z_accel_offset
azindex += 1
if azindex == len(z_accel_avg):
azindex = 0
# Gyro calibration
if pidgx.check_time():
x_gyro_offset = pidgx.get_output_value(to_short(x_gyro_reading))
mpu.set_x_gyro_offset(int(x_gyro_offset))
x_gyro_avg[gxindex] = x_gyro_reading
x_gyro_offset_avg[gxindex] = x_gyro_offset
gxindex += 1
if gxindex == len(x_gyro_avg):
gxindex = 0
print('x_avg_read_gyro: ' +
str(avg_from_array(x_gyro_avg)) +
' x_avg_offset: ' +
str(avg_from_array(x_gyro_offset_avg)))
print('y_avg_read_gyro: ' +
str(avg_from_array(y_gyro_avg)) +
' y_avg_offset: ' +
str(avg_from_array(y_gyro_offset_avg)))
print('z_avg_read_gyro: ' +
str(avg_from_array(z_gyro_avg)) +
' z_avg_offset: ' +
str(avg_from_array(z_gyro_offset_avg)))
if pidgy.check_time():
y_gyro_offset = pidgy.get_output_value(to_short(y_gyro_reading))
mpu.set_y_gyro_offset(int(y_gyro_offset))
y_gyro_avg[gyindex] = y_gyro_reading
y_gyro_offset_avg[gyindex] = y_gyro_offset
gyindex += 1
if gyindex == len(y_gyro_avg):
gyindex = 0
if pidgz.check_time():
z_gyro_offset = pidgz.get_output_value(to_short(z_gyro_reading))
mpu.set_z_gyro_offset(int(z_gyro_offset))
z_gyro_avg[gzindex] = z_gyro_reading
z_gyro_offset_avg[gzindex] = z_gyro_offset
gzindex += 1
if gzindex == len(z_gyro_avg):
gzindex = 0
except KeyboardInterrupt:
pass
| 29.596939
| 78
| 0.604034
|
e18770a344360f6b0100c6876d2b35412cf5b112
| 4,432
|
py
|
Python
|
n_dist_keying/n_distance_voter.py
|
JKamlah/ocromore
|
f9d302eff234478c98e03740adf6bbeeafe7db8d
|
[
"Apache-2.0"
] | 16
|
2018-04-20T11:11:18.000Z
|
2020-01-14T11:11:59.000Z
|
n_dist_keying/n_distance_voter.py
|
JKamlah/ocromore
|
f9d302eff234478c98e03740adf6bbeeafe7db8d
|
[
"Apache-2.0"
] | 2
|
2019-09-16T06:22:02.000Z
|
2020-08-27T23:11:49.000Z
|
n_dist_keying/n_distance_voter.py
|
JKamlah/ocromore
|
f9d302eff234478c98e03740adf6bbeeafe7db8d
|
[
"Apache-2.0"
] | 4
|
2019-07-18T18:01:05.000Z
|
2020-07-10T18:44:09.000Z
|
from n_dist_keying.distance_storage import DistanceStorage
from n_dist_keying.text_comparator import TextComparator
import numpy as np
class NDistanceVoter(object):
def __init__(self, texts):
self.d_storage = DistanceStorage()
self._texts = texts
def set_texts(self, new_texts):
self._texts = new_texts
def get_texts(self):
return self._texts
def reset(self):
self.d_storage = DistanceStorage()
self._texts = []
def compare_texts(self, take_longest_on_empty_lines=False, vote_without_spaces=False):
"""
Compares an array of texts and gives the n_distance vote
:param texts:
:return:
"""
texts_loc = self.get_texts()
if vote_without_spaces:
for text_index, text in enumerate(texts_loc):
texts_loc[text_index] = text.replace(" ","")
if take_longest_on_empty_lines is True:
texts = self.get_texts()
textlens = []
number_empty = 0
for text in texts:
if text is False or text is True:
text = "" # todo verify this correct j4t 20.02
textlens.append(len(text))
if text.strip(" ") == "":
number_empty += 1
too_less_text = (len(texts)-number_empty) <= 2
if too_less_text:
# if too less strings to compare, just take the longest string as result
selected_index = np.argmax(textlens)
return selected_index
# do a text-wise comparison, which calculates a distance between all texts in this set
for text_index, text in enumerate(texts_loc):
self.compare_with_other_texts(text_index, text)
# calculate the distance from each item in set to all others
for text_index, text in enumerate(texts_loc):
self.d_storage.calculate_accumulated_distance(text_index)
# get the index of the item in set, which has the shortest distance to all others
self.d_storage.calculate_shortest_distance_index()
shortest_dist_index = self.d_storage.get_shortest_distance_index()
return shortest_dist_index
def compare_with_other_texts(self, text_index, text):
for text_index_cmp, text_cmp in enumerate(self.get_texts()):
# if line has the same index, continue
if text_index is text_index_cmp:
continue
existing_distance = self.d_storage.fetch_value(text_index, text_index_cmp)
# if line was already compared, continue
if existing_distance is not None:
continue
distance = self.get_distance(text, text_cmp)
self.d_storage.store_value(text_index, text_index_cmp, distance)
def get_distance(self, text1, text2):
# todo add more possibilities for distance measurement, i.e confidences, edit distance, context weighting
MODE_DIFFLIB = 'difflib' #best bet
MODE_NORMED_LEVENSHTEIN = 'normed_levenshtein' # longest alignment normed levenshtein distance
MODE_SORENSEN = 'sorensen'
MODE_JACCARD = 'jaccard'
MODE_HAMMING = 'hamming'
MODE_MYERS = 'myers' # use myers special difflib sequence matcher
mode = MODE_DIFFLIB # set your mode here
# return a fixed negative value if one of the strings is not defined
if text1 is False and text2 is False or text1 is None and text2 is None:
return 0
# One is false and one is not false
if (text1 is False or text2 is False) or (text1 is None or text2 is None):
return 1
dist = 1
if mode == MODE_DIFFLIB:
dist = TextComparator.compare_ocr_strings_difflib_seqmatch(text1, text2)
elif mode == MODE_NORMED_LEVENSHTEIN:
dist = TextComparator.compare_ocr_strings_levensthein_normed(text1, text2)
elif mode == MODE_HAMMING:
dist = TextComparator.compare_ocr_strings_hamming(text1, text2)
elif mode == MODE_SORENSEN:
dist = TextComparator.compare_ocr_strings_sorensen(text1, text2)
elif mode == MODE_JACCARD:
dist = TextComparator.compare_ocr_strings_jaccard(text1, text2)
elif mode == MODE_MYERS:
dist = TextComparator.compare_ocr_strings_myers(text1, text2)
return dist
| 36.327869
| 113
| 0.644179
|
1faf03dc44027fd6373e1cca66669a053cb042d1
| 47,964
|
py
|
Python
|
research/object_detection/utils/config_util.py
|
sudharsan02358/project
|
6d309477eb445b76ddc0b832439d9cd41dd41a27
|
[
"Apache-2.0"
] | 2
|
2021-04-02T12:21:35.000Z
|
2021-12-14T07:29:38.000Z
|
research/object_detection/utils/config_util.py
|
sudharsan02358/project
|
6d309477eb445b76ddc0b832439d9cd41dd41a27
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/utils/config_util.py
|
sudharsan02358/project
|
6d309477eb445b76ddc0b832439d9cd41dd41a27
|
[
"Apache-2.0"
] | 1
|
2021-12-04T19:35:04.000Z
|
2021-12-04T19:35:04.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for reading and updating configuration files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf import text_format
import tensorflow.compat.v1 as tf
from tensorflow.python.lib.io import file_io
from object_detection.protos import eval_pb2
from object_detection.protos import graph_rewriter_pb2
from object_detection.protos import input_reader_pb2
from object_detection.protos import model_pb2
from object_detection.protos import pipeline_pb2
from object_detection.protos import train_pb2
def get_image_resizer_config(model_config):
"""Returns the image resizer config from a model config.
Args:
model_config: A model_pb2.DetectionModel.
Returns:
An image_resizer_pb2.ImageResizer.
Raises:
ValueError: If the model type is not recognized.
"""
meta_architecture = model_config.WhichOneof("model")
meta_architecture_config = getattr(model_config, meta_architecture)
if hasattr(meta_architecture_config, "image_resizer"):
return getattr(meta_architecture_config, "image_resizer")
else:
raise ValueError("{} has no image_reszier_config".format(
meta_architecture))
def get_spatial_image_size(image_resizer_config):
"""Returns expected spatial size of the output image from a given config.
Args:
image_resizer_config: An image_resizer_pb2.ImageResizer.
Returns:
A list of two integers of the form [height, width]. `height` and `width` are
set -1 if they cannot be determined during graph construction.
Raises:
ValueError: If the model type is not recognized.
"""
if image_resizer_config.HasField("fixed_shape_resizer"):
return [
image_resizer_config.fixed_shape_resizer.height,
image_resizer_config.fixed_shape_resizer.width
]
if image_resizer_config.HasField("keep_aspect_ratio_resizer"):
if image_resizer_config.keep_aspect_ratio_resizer.pad_to_max_dimension:
return [image_resizer_config.keep_aspect_ratio_resizer.max_dimension] * 2
else:
return [-1, -1]
if image_resizer_config.HasField(
"identity_resizer") or image_resizer_config.HasField(
"conditional_shape_resizer"):
return [-1, -1]
raise ValueError("Unknown image resizer type.")
def get_max_num_context_features(model_config):
"""Returns maximum number of context features from a given config.
Args:
model_config: A model config file.
Returns:
An integer specifying the max number of context features if the model
config contains context_config, None otherwise
"""
meta_architecture = model_config.WhichOneof("model")
meta_architecture_config = getattr(model_config, meta_architecture)
if hasattr(meta_architecture_config, "context_config"):
return meta_architecture_config.context_config.max_num_context_features
def get_context_feature_length(model_config):
"""Returns context feature length from a given config.
Args:
model_config: A model config file.
Returns:
An integer specifying the fixed length of each feature in context_features.
"""
meta_architecture = model_config.WhichOneof("model")
meta_architecture_config = getattr(model_config, meta_architecture)
if hasattr(meta_architecture_config, "context_config"):
return meta_architecture_config.context_config.context_feature_length
def get_configs_from_pipeline_file(pipeline_config_path, config_override=None):
"""Reads config from a file containing pipeline_pb2.TrainEvalPipelineConfig.
Args:
pipeline_config_path: Path to pipeline_pb2.TrainEvalPipelineConfig text
proto.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
override pipeline_config_path.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_config`. Value are the
corresponding config objects.
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(pipeline_config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, pipeline_config)
if config_override:
text_format.Merge(config_override, pipeline_config)
return create_configs_from_pipeline_proto(pipeline_config)
def clear_fine_tune_checkpoint(pipeline_config_path,
new_pipeline_config_path):
"""Clears fine_tune_checkpoint and writes a new pipeline config file."""
configs = get_configs_from_pipeline_file(pipeline_config_path)
configs["train_config"].fine_tune_checkpoint = ""
configs["train_config"].load_all_detection_checkpoint_vars = False
pipeline_proto = create_pipeline_proto_from_configs(configs)
with tf.gfile.Open(new_pipeline_config_path, "wb") as f:
f.write(text_format.MessageToString(pipeline_proto))
def update_fine_tune_checkpoint_type(train_config):
"""Set `fine_tune_checkpoint_type` using `from_detection_checkpoint`.
`train_config.from_detection_checkpoint` field is deprecated. For backward
compatibility, this function sets `train_config.fine_tune_checkpoint_type`
based on `train_config.from_detection_checkpoint`.
Args:
train_config: train_pb2.TrainConfig proto object.
"""
if not train_config.fine_tune_checkpoint_type:
if train_config.from_detection_checkpoint:
train_config.fine_tune_checkpoint_type = "detection"
else:
train_config.fine_tune_checkpoint_type = "classification"
def create_configs_from_pipeline_proto(pipeline_config):
"""Creates a configs dictionary from pipeline_pb2.TrainEvalPipelineConfig.
Args:
pipeline_config: pipeline_pb2.TrainEvalPipelineConfig proto object.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_configs`. Value are
the corresponding config objects or list of config objects (only for
eval_input_configs).
"""
configs = {}
configs["model"] = pipeline_config.model
configs["train_config"] = pipeline_config.train_config
configs["train_input_config"] = pipeline_config.train_input_reader
configs["eval_config"] = pipeline_config.eval_config
configs["eval_input_configs"] = pipeline_config.eval_input_reader
# Keeps eval_input_config only for backwards compatibility. All clients should
# read eval_input_configs instead.
if configs["eval_input_configs"]:
configs["eval_input_config"] = configs["eval_input_configs"][0]
if pipeline_config.HasField("graph_rewriter"):
configs["graph_rewriter_config"] = pipeline_config.graph_rewriter
return configs
def get_graph_rewriter_config_from_file(graph_rewriter_config_file):
"""Parses config for graph rewriter.
Args:
graph_rewriter_config_file: file path to the graph rewriter config.
Returns:
graph_rewriter_pb2.GraphRewriter proto
"""
graph_rewriter_config = graph_rewriter_pb2.GraphRewriter()
with tf.gfile.GFile(graph_rewriter_config_file, "r") as f:
text_format.Merge(f.read(), graph_rewriter_config)
return graph_rewriter_config
def create_pipeline_proto_from_configs(configs):
"""Creates a pipeline_pb2.TrainEvalPipelineConfig from configs dictionary.
This function performs the inverse operation of
create_configs_from_pipeline_proto().
Args:
configs: Dictionary of configs. See get_configs_from_pipeline_file().
Returns:
A fully populated pipeline_pb2.TrainEvalPipelineConfig.
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.CopyFrom(configs["model"])
pipeline_config.train_config.CopyFrom(configs["train_config"])
pipeline_config.train_input_reader.CopyFrom(configs["train_input_config"])
pipeline_config.eval_config.CopyFrom(configs["eval_config"])
pipeline_config.eval_input_reader.extend(configs["eval_input_configs"])
if "graph_rewriter_config" in configs:
pipeline_config.graph_rewriter.CopyFrom(configs["graph_rewriter_config"])
return pipeline_config
def save_pipeline_config(pipeline_config, directory):
"""Saves a pipeline config text file to disk.
Args:
pipeline_config: A pipeline_pb2.TrainEvalPipelineConfig.
directory: The model directory into which the pipeline config file will be
saved.
"""
if not file_io.file_exists(directory):
file_io.recursive_create_dir(directory)
pipeline_config_path = os.path.join(directory, "pipeline.config")
config_text = text_format.MessageToString(pipeline_config)
with tf.gfile.Open(pipeline_config_path, "wb") as f:
tf.logging.info("Writing pipeline config file to %s",
pipeline_config_path)
f.write(config_text)
def get_configs_from_multiple_files(model_config_path="",
train_config_path="",
train_input_config_path="",
eval_config_path="",
eval_input_config_path="",
graph_rewriter_config_path=""):
"""Reads training configuration from multiple config files.
Args:
model_config_path: Path to model_pb2.DetectionModel.
train_config_path: Path to train_pb2.TrainConfig.
train_input_config_path: Path to input_reader_pb2.InputReader.
eval_config_path: Path to eval_pb2.EvalConfig.
eval_input_config_path: Path to input_reader_pb2.InputReader.
graph_rewriter_config_path: Path to graph_rewriter_pb2.GraphRewriter.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_config`. Key/Values are
returned only for valid (non-empty) strings.
"""
configs = {}
if model_config_path:
model_config = model_pb2.DetectionModel()
with tf.gfile.GFile(model_config_path, "r") as f:
text_format.Merge(f.read(), model_config)
configs["model"] = model_config
if train_config_path:
train_config = train_pb2.TrainConfig()
with tf.gfile.GFile(train_config_path, "r") as f:
text_format.Merge(f.read(), train_config)
configs["train_config"] = train_config
if train_input_config_path:
train_input_config = input_reader_pb2.InputReader()
with tf.gfile.GFile(train_input_config_path, "r") as f:
text_format.Merge(f.read(), train_input_config)
configs["train_input_config"] = train_input_config
if eval_config_path:
eval_config = eval_pb2.EvalConfig()
with tf.gfile.GFile(eval_config_path, "r") as f:
text_format.Merge(f.read(), eval_config)
configs["eval_config"] = eval_config
if eval_input_config_path:
eval_input_config = input_reader_pb2.InputReader()
with tf.gfile.GFile(eval_input_config_path, "r") as f:
text_format.Merge(f.read(), eval_input_config)
configs["eval_input_configs"] = [eval_input_config]
if graph_rewriter_config_path:
configs["graph_rewriter_config"] = get_graph_rewriter_config_from_file(
graph_rewriter_config_path)
return configs
def get_number_of_classes(model_config):
"""Returns the number of classes for a detection model.
Args:
model_config: A model_pb2.DetectionModel.
Returns:
Number of classes.
Raises:
ValueError: If the model type is not recognized.
"""
meta_architecture = model_config.WhichOneof("model")
meta_architecture_config = getattr(model_config, meta_architecture)
if hasattr(meta_architecture_config, "num_classes"):
return meta_architecture_config.num_classes
else:
raise ValueError("{} does not have num_classes.".format(meta_architecture))
def get_optimizer_type(train_config):
"""Returns the optimizer type for training.
Args:
train_config: A train_pb2.TrainConfig.
Returns:
The type of the optimizer
"""
return train_config.optimizer.WhichOneof("optimizer")
def get_learning_rate_type(optimizer_config):
"""Returns the learning rate type for training.
Args:
optimizer_config: An optimizer_pb2.Optimizer.
Returns:
The type of the learning rate.
"""
return optimizer_config.learning_rate.WhichOneof("learning_rate")
def _is_generic_key(key):
"""Determines whether the key starts with a generic config dictionary key."""
for prefix in [
"graph_rewriter_config",
"model",
"train_input_config",
"train_config",
"eval_config"]:
if key.startswith(prefix + "."):
return True
return False
def _check_and_convert_legacy_input_config_key(key):
"""Checks key and converts legacy input config update to specific update.
Args:
key: string indicates the target of update operation.
Returns:
is_valid_input_config_key: A boolean indicating whether the input key is to
update input config(s).
key_name: 'eval_input_configs' or 'train_input_config' string if
is_valid_input_config_key is true. None if is_valid_input_config_key is
false.
input_name: always returns None since legacy input config key never
specifies the target input config. Keeping this output only to match the
output form defined for input config update.
field_name: the field name in input config. `key` itself if
is_valid_input_config_key is false.
"""
key_name = None
input_name = None
field_name = key
is_valid_input_config_key = True
if field_name == "train_shuffle":
key_name = "train_input_config"
field_name = "shuffle"
elif field_name == "eval_shuffle":
key_name = "eval_input_configs"
field_name = "shuffle"
elif field_name == "train_input_path":
key_name = "train_input_config"
field_name = "input_path"
elif field_name == "eval_input_path":
key_name = "eval_input_configs"
field_name = "input_path"
elif field_name == "append_train_input_path":
key_name = "train_input_config"
field_name = "input_path"
elif field_name == "append_eval_input_path":
key_name = "eval_input_configs"
field_name = "input_path"
else:
is_valid_input_config_key = False
return is_valid_input_config_key, key_name, input_name, field_name
def check_and_parse_input_config_key(configs, key):
"""Checks key and returns specific fields if key is valid input config update.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
key: string indicates the target of update operation.
Returns:
is_valid_input_config_key: A boolean indicate whether the input key is to
update input config(s).
key_name: 'eval_input_configs' or 'train_input_config' string if
is_valid_input_config_key is true. None if is_valid_input_config_key is
false.
input_name: the name of the input config to be updated. None if
is_valid_input_config_key is false.
field_name: the field name in input config. `key` itself if
is_valid_input_config_key is false.
Raises:
ValueError: when the input key format doesn't match any known formats.
ValueError: if key_name doesn't match 'eval_input_configs' or
'train_input_config'.
ValueError: if input_name doesn't match any name in train or eval input
configs.
ValueError: if field_name doesn't match any supported fields.
"""
key_name = None
input_name = None
field_name = None
fields = key.split(":")
if len(fields) == 1:
field_name = key
return _check_and_convert_legacy_input_config_key(key)
elif len(fields) == 3:
key_name = fields[0]
input_name = fields[1]
field_name = fields[2]
else:
raise ValueError("Invalid key format when overriding configs.")
# Checks if key_name is valid for specific update.
if key_name not in ["eval_input_configs", "train_input_config"]:
raise ValueError("Invalid key_name when overriding input config.")
# Checks if input_name is valid for specific update. For train input config it
# should match configs[key_name].name, for eval input configs it should match
# the name field of one of the eval_input_configs.
if isinstance(configs[key_name], input_reader_pb2.InputReader):
is_valid_input_name = configs[key_name].name == input_name
else:
is_valid_input_name = input_name in [
eval_input_config.name for eval_input_config in configs[key_name]
]
if not is_valid_input_name:
raise ValueError("Invalid input_name when overriding input config.")
# Checks if field_name is valid for specific update.
if field_name not in [
"input_path", "label_map_path", "shuffle", "mask_type",
"sample_1_of_n_examples"
]:
raise ValueError("Invalid field_name when overriding input config.")
return True, key_name, input_name, field_name
def merge_external_params_with_configs(configs, hparams=None, kwargs_dict=None):
"""Updates `configs` dictionary based on supplied parameters.
This utility is for modifying specific fields in the object detection configs.
Say that one would like to experiment with different learning rates, momentum
values, or batch sizes. Rather than creating a new config text file for each
experiment, one can use a single base config file, and update particular
values.
There are two types of field overrides:
1. Strategy-based overrides, which update multiple relevant configuration
options. For example, updating `learning_rate` will update both the warmup and
final learning rates.
In this case key can be one of the following formats:
1. legacy update: single string that indicates the attribute to be
updated. E.g. 'label_map_path', 'eval_input_path', 'shuffle'.
Note that when updating fields (e.g. eval_input_path, eval_shuffle) in
eval_input_configs, the override will only be applied when
eval_input_configs has exactly 1 element.
2. specific update: colon separated string that indicates which field in
which input_config to update. It should have 3 fields:
- key_name: Name of the input config we should update, either
'train_input_config' or 'eval_input_configs'
- input_name: a 'name' that can be used to identify elements, especially
when configs[key_name] is a repeated field.
- field_name: name of the field that you want to override.
For example, given configs dict as below:
configs = {
'model': {...}
'train_config': {...}
'train_input_config': {...}
'eval_config': {...}
'eval_input_configs': [{ name:"eval_coco", ...},
{ name:"eval_voc", ... }]
}
Assume we want to update the input_path of the eval_input_config
whose name is 'eval_coco'. The `key` would then be:
'eval_input_configs:eval_coco:input_path'
2. Generic key/value, which update a specific parameter based on namespaced
configuration keys. For example,
`model.ssd.loss.hard_example_miner.max_negatives_per_positive` will update the
hard example miner configuration for an SSD model config. Generic overrides
are automatically detected based on the namespaced keys.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
hparams: A `HParams`.
kwargs_dict: Extra keyword arguments that are treated the same way as
attribute/value pairs in `hparams`. Note that hyperparameters with the
same names will override keyword arguments.
Returns:
`configs` dictionary.
Raises:
ValueError: when the key string doesn't match any of its allowed formats.
"""
if kwargs_dict is None:
kwargs_dict = {}
if hparams:
kwargs_dict.update(hparams.values())
for key, value in kwargs_dict.items():
tf.logging.info("Maybe overwriting %s: %s", key, value)
# pylint: disable=g-explicit-bool-comparison
if value == "" or value is None:
continue
# pylint: enable=g-explicit-bool-comparison
elif _maybe_update_config_with_key_value(configs, key, value):
continue
elif _is_generic_key(key):
_update_generic(configs, key, value)
else:
tf.logging.info("Ignoring config override key: %s", key)
return configs
def _maybe_update_config_with_key_value(configs, key, value):
"""Checks key type and updates `configs` with the key value pair accordingly.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
key: String indicates the field(s) to be updated.
value: Value used to override existing field value.
Returns:
A boolean value that indicates whether the override succeeds.
Raises:
ValueError: when the key string doesn't match any of the formats above.
"""
is_valid_input_config_key, key_name, input_name, field_name = (
check_and_parse_input_config_key(configs, key))
if is_valid_input_config_key:
update_input_reader_config(
configs,
key_name=key_name,
input_name=input_name,
field_name=field_name,
value=value)
elif field_name == "learning_rate":
_update_initial_learning_rate(configs, value)
elif field_name == "batch_size":
_update_batch_size(configs, value)
elif field_name == "momentum_optimizer_value":
_update_momentum_optimizer_value(configs, value)
elif field_name == "classification_localization_weight_ratio":
# Localization weight is fixed to 1.0.
_update_classification_localization_weight_ratio(configs, value)
elif field_name == "focal_loss_gamma":
_update_focal_loss_gamma(configs, value)
elif field_name == "focal_loss_alpha":
_update_focal_loss_alpha(configs, value)
elif field_name == "train_steps":
_update_train_steps(configs, value)
elif field_name == "label_map_path":
_update_label_map_path(configs, value)
elif field_name == "mask_type":
_update_mask_type(configs, value)
elif field_name == "sample_1_of_n_eval_examples":
_update_all_eval_input_configs(configs, "sample_1_of_n_examples", value)
elif field_name == "eval_num_epochs":
_update_all_eval_input_configs(configs, "num_epochs", value)
elif field_name == "eval_with_moving_averages":
_update_use_moving_averages(configs, value)
elif field_name == "retain_original_images_in_eval":
_update_retain_original_images(configs["eval_config"], value)
elif field_name == "use_bfloat16":
_update_use_bfloat16(configs, value)
elif field_name == "retain_original_image_additional_channels_in_eval":
_update_retain_original_image_additional_channels(configs["eval_config"],
value)
elif field_name == "num_classes":
_update_num_classes(configs["model"], value)
elif field_name == "sample_from_datasets_weights":
_update_sample_from_datasets_weights(configs["train_input_config"], value)
elif field_name == "peak_max_pool_kernel_size":
_update_peak_max_pool_kernel_size(configs["model"], value)
elif field_name == "candidate_search_scale":
_update_candidate_search_scale(configs["model"], value)
elif field_name == "candidate_ranking_mode":
_update_candidate_ranking_mode(configs["model"], value)
elif field_name == "score_distance_offset":
_update_score_distance_offset(configs["model"], value)
elif field_name == "box_scale":
_update_box_scale(configs["model"], value)
elif field_name == "keypoint_candidate_score_threshold":
_update_keypoint_candidate_score_threshold(configs["model"], value)
elif field_name == "rescore_instances":
_update_rescore_instances(configs["model"], value)
else:
return False
return True
def _update_tf_record_input_path(input_config, input_path):
"""Updates input configuration to reflect a new input path.
The input_config object is updated in place, and hence not returned.
Args:
input_config: A input_reader_pb2.InputReader.
input_path: A path to data or list of paths.
Raises:
TypeError: if input reader type is not `tf_record_input_reader`.
"""
input_reader_type = input_config.WhichOneof("input_reader")
if input_reader_type == "tf_record_input_reader":
input_config.tf_record_input_reader.ClearField("input_path")
if isinstance(input_path, list):
input_config.tf_record_input_reader.input_path.extend(input_path)
else:
input_config.tf_record_input_reader.input_path.append(input_path)
else:
raise TypeError("Input reader type must be `tf_record_input_reader`.")
def update_input_reader_config(configs,
key_name=None,
input_name=None,
field_name=None,
value=None,
path_updater=_update_tf_record_input_path):
"""Updates specified input reader config field.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
key_name: Name of the input config we should update, either
'train_input_config' or 'eval_input_configs'
input_name: String name used to identify input config to update with. Should
be either None or value of the 'name' field in one of the input reader
configs.
field_name: Field name in input_reader_pb2.InputReader.
value: Value used to override existing field value.
path_updater: helper function used to update the input path. Only used when
field_name is "input_path".
Raises:
ValueError: when input field_name is None.
ValueError: when input_name is None and number of eval_input_readers does
not equal to 1.
"""
if isinstance(configs[key_name], input_reader_pb2.InputReader):
# Updates singular input_config object.
target_input_config = configs[key_name]
if field_name == "input_path":
path_updater(input_config=target_input_config, input_path=value)
else:
setattr(target_input_config, field_name, value)
elif input_name is None and len(configs[key_name]) == 1:
# Updates first (and the only) object of input_config list.
target_input_config = configs[key_name][0]
if field_name == "input_path":
path_updater(input_config=target_input_config, input_path=value)
else:
setattr(target_input_config, field_name, value)
elif input_name is not None and len(configs[key_name]):
# Updates input_config whose name matches input_name.
update_count = 0
for input_config in configs[key_name]:
if input_config.name == input_name:
setattr(input_config, field_name, value)
update_count = update_count + 1
if not update_count:
raise ValueError(
"Input name {} not found when overriding.".format(input_name))
elif update_count > 1:
raise ValueError("Duplicate input name found when overriding.")
else:
key_name = "None" if key_name is None else key_name
input_name = "None" if input_name is None else input_name
field_name = "None" if field_name is None else field_name
raise ValueError("Unknown input config overriding: "
"key_name:{}, input_name:{}, field_name:{}.".format(
key_name, input_name, field_name))
def _update_initial_learning_rate(configs, learning_rate):
"""Updates `configs` to reflect the new initial learning rate.
This function updates the initial learning rate. For learning rate schedules,
all other defined learning rates in the pipeline config are scaled to maintain
their same ratio with the initial learning rate.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
learning_rate: Initial learning rate for optimizer.
Raises:
TypeError: if optimizer type is not supported, or if learning rate type is
not supported.
"""
optimizer_type = get_optimizer_type(configs["train_config"])
if optimizer_type == "rms_prop_optimizer":
optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
elif optimizer_type == "momentum_optimizer":
optimizer_config = configs["train_config"].optimizer.momentum_optimizer
elif optimizer_type == "adam_optimizer":
optimizer_config = configs["train_config"].optimizer.adam_optimizer
else:
raise TypeError("Optimizer %s is not supported." % optimizer_type)
learning_rate_type = get_learning_rate_type(optimizer_config)
if learning_rate_type == "constant_learning_rate":
constant_lr = optimizer_config.learning_rate.constant_learning_rate
constant_lr.learning_rate = learning_rate
elif learning_rate_type == "exponential_decay_learning_rate":
exponential_lr = (
optimizer_config.learning_rate.exponential_decay_learning_rate)
exponential_lr.initial_learning_rate = learning_rate
elif learning_rate_type == "manual_step_learning_rate":
manual_lr = optimizer_config.learning_rate.manual_step_learning_rate
original_learning_rate = manual_lr.initial_learning_rate
learning_rate_scaling = float(learning_rate) / original_learning_rate
manual_lr.initial_learning_rate = learning_rate
for schedule in manual_lr.schedule:
schedule.learning_rate *= learning_rate_scaling
elif learning_rate_type == "cosine_decay_learning_rate":
cosine_lr = optimizer_config.learning_rate.cosine_decay_learning_rate
learning_rate_base = cosine_lr.learning_rate_base
warmup_learning_rate = cosine_lr.warmup_learning_rate
warmup_scale_factor = warmup_learning_rate / learning_rate_base
cosine_lr.learning_rate_base = learning_rate
cosine_lr.warmup_learning_rate = warmup_scale_factor * learning_rate
else:
raise TypeError("Learning rate %s is not supported." % learning_rate_type)
def _update_batch_size(configs, batch_size):
"""Updates `configs` to reflect the new training batch size.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
batch_size: Batch size to use for training (Ideally a power of 2). Inputs
are rounded, and capped to be 1 or greater.
"""
configs["train_config"].batch_size = max(1, int(round(batch_size)))
def _validate_message_has_field(message, field):
if not message.HasField(field):
raise ValueError("Expecting message to have field %s" % field)
def _update_generic(configs, key, value):
"""Update a pipeline configuration parameter based on a generic key/value.
Args:
configs: Dictionary of pipeline configuration protos.
key: A string key, dot-delimited to represent the argument key.
e.g. "model.ssd.train_config.batch_size"
value: A value to set the argument to. The type of the value must match the
type for the protocol buffer. Note that setting the wrong type will
result in a TypeError.
e.g. 42
Raises:
ValueError if the message key does not match the existing proto fields.
TypeError the value type doesn't match the protobuf field type.
"""
fields = key.split(".")
first_field = fields.pop(0)
last_field = fields.pop()
message = configs[first_field]
for field in fields:
_validate_message_has_field(message, field)
message = getattr(message, field)
_validate_message_has_field(message, last_field)
setattr(message, last_field, value)
def _update_momentum_optimizer_value(configs, momentum):
"""Updates `configs` to reflect the new momentum value.
Momentum is only supported for RMSPropOptimizer and MomentumOptimizer. For any
other optimizer, no changes take place. The configs dictionary is updated in
place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
momentum: New momentum value. Values are clipped at 0.0 and 1.0.
Raises:
TypeError: If the optimizer type is not `rms_prop_optimizer` or
`momentum_optimizer`.
"""
optimizer_type = get_optimizer_type(configs["train_config"])
if optimizer_type == "rms_prop_optimizer":
optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
elif optimizer_type == "momentum_optimizer":
optimizer_config = configs["train_config"].optimizer.momentum_optimizer
else:
raise TypeError("Optimizer type must be one of `rms_prop_optimizer` or "
"`momentum_optimizer`.")
optimizer_config.momentum_optimizer_value = min(max(0.0, momentum), 1.0)
def _update_classification_localization_weight_ratio(configs, ratio):
"""Updates the classification/localization weight loss ratio.
Detection models usually define a loss weight for both classification and
objectness. This function updates the weights such that the ratio between
classification weight to localization weight is the ratio provided.
Arbitrarily, localization weight is set to 1.0.
Note that in the case of Faster R-CNN, this same ratio is applied to the first
stage objectness loss weight relative to localization loss weight.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
ratio: Desired ratio of classification (and/or objectness) loss weight to
localization loss weight.
"""
meta_architecture = configs["model"].WhichOneof("model")
if meta_architecture == "faster_rcnn":
model = configs["model"].faster_rcnn
model.first_stage_localization_loss_weight = 1.0
model.first_stage_objectness_loss_weight = ratio
model.second_stage_localization_loss_weight = 1.0
model.second_stage_classification_loss_weight = ratio
if meta_architecture == "ssd":
model = configs["model"].ssd
model.loss.localization_weight = 1.0
model.loss.classification_weight = ratio
def _get_classification_loss(model_config):
"""Returns the classification loss for a model."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "faster_rcnn":
model = model_config.faster_rcnn
classification_loss = model.second_stage_classification_loss
elif meta_architecture == "ssd":
model = model_config.ssd
classification_loss = model.loss.classification_loss
else:
raise TypeError("Did not recognize the model architecture.")
return classification_loss
def _update_focal_loss_gamma(configs, gamma):
"""Updates the gamma value for a sigmoid focal loss.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
gamma: Exponent term in focal loss.
Raises:
TypeError: If the classification loss is not `weighted_sigmoid_focal`.
"""
classification_loss = _get_classification_loss(configs["model"])
classification_loss_type = classification_loss.WhichOneof(
"classification_loss")
if classification_loss_type != "weighted_sigmoid_focal":
raise TypeError("Classification loss must be `weighted_sigmoid_focal`.")
classification_loss.weighted_sigmoid_focal.gamma = gamma
def _update_focal_loss_alpha(configs, alpha):
"""Updates the alpha value for a sigmoid focal loss.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
alpha: Class weight multiplier for sigmoid loss.
Raises:
TypeError: If the classification loss is not `weighted_sigmoid_focal`.
"""
classification_loss = _get_classification_loss(configs["model"])
classification_loss_type = classification_loss.WhichOneof(
"classification_loss")
if classification_loss_type != "weighted_sigmoid_focal":
raise TypeError("Classification loss must be `weighted_sigmoid_focal`.")
classification_loss.weighted_sigmoid_focal.alpha = alpha
def _update_train_steps(configs, train_steps):
"""Updates `configs` to reflect new number of training steps."""
configs["train_config"].num_steps = int(train_steps)
def _update_all_eval_input_configs(configs, field, value):
"""Updates the content of `field` with `value` for all eval input configs."""
for eval_input_config in configs["eval_input_configs"]:
setattr(eval_input_config, field, value)
def _update_label_map_path(configs, label_map_path):
"""Updates the label map path for both train and eval input readers.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
label_map_path: New path to `StringIntLabelMap` pbtxt file.
"""
configs["train_input_config"].label_map_path = label_map_path
_update_all_eval_input_configs(configs, "label_map_path", label_map_path)
def _update_mask_type(configs, mask_type):
"""Updates the mask type for both train and eval input readers.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
mask_type: A string name representing a value of
input_reader_pb2.InstanceMaskType
"""
configs["train_input_config"].mask_type = mask_type
_update_all_eval_input_configs(configs, "mask_type", mask_type)
def _update_use_moving_averages(configs, use_moving_averages):
"""Updates the eval config option to use or not use moving averages.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
use_moving_averages: Boolean indicating whether moving average variables
should be loaded during evaluation.
"""
configs["eval_config"].use_moving_averages = use_moving_averages
def _update_retain_original_images(eval_config, retain_original_images):
"""Updates eval config with option to retain original images.
The eval_config object is updated in place, and hence not returned.
Args:
eval_config: A eval_pb2.EvalConfig.
retain_original_images: Boolean indicating whether to retain original images
in eval mode.
"""
eval_config.retain_original_images = retain_original_images
def _update_use_bfloat16(configs, use_bfloat16):
"""Updates `configs` to reflect the new setup on whether to use bfloat16.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
use_bfloat16: A bool, indicating whether to use bfloat16 for training.
"""
configs["train_config"].use_bfloat16 = use_bfloat16
def _update_retain_original_image_additional_channels(
eval_config,
retain_original_image_additional_channels):
"""Updates eval config to retain original image additional channels or not.
The eval_config object is updated in place, and hence not returned.
Args:
eval_config: A eval_pb2.EvalConfig.
retain_original_image_additional_channels: Boolean indicating whether to
retain original image additional channels in eval mode.
"""
eval_config.retain_original_image_additional_channels = (
retain_original_image_additional_channels)
def remove_unecessary_ema(variables_to_restore, no_ema_collection=None):
"""Remap and Remove EMA variable that are not created during training.
ExponentialMovingAverage.variables_to_restore() returns a map of EMA names
to tf variables to restore. E.g.:
{
conv/batchnorm/gamma/ExponentialMovingAverage: conv/batchnorm/gamma,
conv_4/conv2d_params/ExponentialMovingAverage: conv_4/conv2d_params,
global_step: global_step
}
This function takes care of the extra ExponentialMovingAverage variables
that get created during eval but aren't available in the checkpoint, by
remapping the key to the shallow copy of the variable itself, and remove
the entry of its EMA from the variables to restore. An example resulting
dictionary would look like:
{
conv/batchnorm/gamma: conv/batchnorm/gamma,
conv_4/conv2d_params: conv_4/conv2d_params,
global_step: global_step
}
Args:
variables_to_restore: A dictionary created by ExponentialMovingAverage.
variables_to_restore().
no_ema_collection: A list of namescope substrings to match the variables
to eliminate EMA.
Returns:
A variables_to_restore dictionary excluding the collection of unwanted
EMA mapping.
"""
if no_ema_collection is None:
return variables_to_restore
for key in variables_to_restore:
if "ExponentialMovingAverage" in key:
for name in no_ema_collection:
if name in key:
variables_to_restore[key.replace("/ExponentialMovingAverage",
"")] = variables_to_restore[key]
del variables_to_restore[key]
return variables_to_restore
def _update_num_classes(model_config, num_classes):
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "faster_rcnn":
model_config.faster_rcnn.num_classes = num_classes
if meta_architecture == "ssd":
model_config.ssd.num_classes = num_classes
def _update_sample_from_datasets_weights(input_reader_config, weights):
"""Updated sample_from_datasets_weights with overrides."""
if len(weights) != len(input_reader_config.sample_from_datasets_weights):
raise ValueError(
"sample_from_datasets_weights override has a different number of values"
" ({}) than the configured dataset weights ({})."
.format(
len(input_reader_config.sample_from_datasets_weights),
len(weights)))
del input_reader_config.sample_from_datasets_weights[:]
input_reader_config.sample_from_datasets_weights.extend(weights)
def _update_peak_max_pool_kernel_size(model_config, kernel_size):
"""Updates the max pool kernel size (NMS) for keypoints in CenterNet."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.peak_max_pool_kernel_size = kernel_size
else:
tf.logging.warning("Ignoring config override key for "
"peak_max_pool_kernel_size since there are multiple "
"keypoint estimation tasks")
def _update_candidate_search_scale(model_config, search_scale):
"""Updates the keypoint candidate search scale in CenterNet."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.candidate_search_scale = search_scale
else:
tf.logging.warning("Ignoring config override key for "
"candidate_search_scale since there are multiple "
"keypoint estimation tasks")
def _update_candidate_ranking_mode(model_config, mode):
"""Updates how keypoints are snapped to candidates in CenterNet."""
if mode not in ("min_distance", "score_distance_ratio"):
raise ValueError("Attempting to set the keypoint candidate ranking mode "
"to {}, but the only options are 'min_distance' and "
"'score_distance_ratio'.".format(mode))
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.candidate_ranking_mode = mode
else:
tf.logging.warning("Ignoring config override key for "
"candidate_ranking_mode since there are multiple "
"keypoint estimation tasks")
def _update_score_distance_offset(model_config, offset):
"""Updates the keypoint candidate selection metric. See CenterNet proto."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.score_distance_offset = offset
else:
tf.logging.warning("Ignoring config override key for "
"score_distance_offset since there are multiple "
"keypoint estimation tasks")
def _update_box_scale(model_config, box_scale):
"""Updates the keypoint candidate search region. See CenterNet proto."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.box_scale = box_scale
else:
tf.logging.warning("Ignoring config override key for box_scale since "
"there are multiple keypoint estimation tasks")
def _update_keypoint_candidate_score_threshold(model_config, threshold):
"""Updates the keypoint candidate score threshold. See CenterNet proto."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.keypoint_candidate_score_threshold = threshold
else:
tf.logging.warning("Ignoring config override key for "
"keypoint_candidate_score_threshold since there are "
"multiple keypoint estimation tasks")
def _update_rescore_instances(model_config, should_rescore):
"""Updates whether boxes should be rescored based on keypoint confidences."""
if isinstance(should_rescore, str):
should_rescore = True if should_rescore == "True" else False
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "center_net":
if len(model_config.center_net.keypoint_estimation_task) == 1:
kpt_estimation_task = model_config.center_net.keypoint_estimation_task[0]
kpt_estimation_task.rescore_instances = should_rescore
else:
tf.logging.warning("Ignoring config override key for "
"rescore_instances since there are multiple keypoint "
"estimation tasks")
| 39.903494
| 80
| 0.748541
|
355543aa3fb8cac417d062551c3a3dc9b6182c76
| 3,151
|
py
|
Python
|
students/K33402/Beresnev_Andrey/lab2/Hotels/Hotels/settings.py
|
agentofknowledge/ITMO_ICT_WebDevelopment_2020-2021
|
7d5eab0d68af378083f21473cbbd5e5def6aa60a
|
[
"MIT"
] | 4
|
2020-09-03T15:41:42.000Z
|
2021-12-24T15:28:20.000Z
|
students/K33402/Beresnev_Andrey/lab2/Hotels/Hotels/settings.py
|
agentofknowledge/ITMO_ICT_WebDevelopment_2020-2021
|
7d5eab0d68af378083f21473cbbd5e5def6aa60a
|
[
"MIT"
] | 48
|
2020-09-13T20:22:42.000Z
|
2021-04-30T11:13:30.000Z
|
students/K33402/Beresnev_Andrey/lab2/Hotels/Hotels/settings.py
|
agentofknowledge/ITMO_ICT_WebDevelopment_2020-2021
|
7d5eab0d68af378083f21473cbbd5e5def6aa60a
|
[
"MIT"
] | 69
|
2020-09-06T10:32:37.000Z
|
2021-11-28T18:13:17.000Z
|
"""
Django settings for Hotels project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l%7#mxzms=z5doaru!1#n9zf(siqa+i52skx0cl_z7w!lwnop1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'MyApp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Hotels.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Hotels.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_USER_MODEL = 'MyApp.Client'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| 25.617886
| 91
| 0.698826
|
8990b10749eb08d19bc3e8dc990e34611495b5e1
| 249
|
py
|
Python
|
atcoder/abc062/a.py
|
Ashindustry007/competitive-programming
|
2eabd3975c029d235abb7854569593d334acae2f
|
[
"WTFPL"
] | 506
|
2018-08-22T10:30:38.000Z
|
2022-03-31T10:01:49.000Z
|
atcoder/abc062/a.py
|
Ashindustry007/competitive-programming
|
2eabd3975c029d235abb7854569593d334acae2f
|
[
"WTFPL"
] | 13
|
2019-08-07T18:31:18.000Z
|
2020-12-15T21:54:41.000Z
|
atcoder/abc062/a.py
|
Ashindustry007/competitive-programming
|
2eabd3975c029d235abb7854569593d334acae2f
|
[
"WTFPL"
] | 234
|
2018-08-06T17:11:41.000Z
|
2022-03-26T10:56:42.000Z
|
#!/usr/bin/env python3
# https://abc062.contest.atcoder.jp/tasks/abc062_a
x, y = map(int, input().split())
s = [4, 6, 9, 11]
if x in s and y in s: print('Yes')
elif x in s or y in s: print('No')
elif x == 2 or y == 2: print('No')
else: print('Yes')
| 27.666667
| 50
| 0.606426
|
e72e7a23cff9901315046bfb52f2e631f6ffff5e
| 1,964
|
py
|
Python
|
knover/tasks/__init__.py
|
Vonderland/Knover
|
73815ed6f91a301674001a8d29f3a0356e627ea6
|
[
"Apache-2.0"
] | 1
|
2020-07-06T07:21:01.000Z
|
2020-07-06T07:21:01.000Z
|
knover/tasks/__init__.py
|
Vonderland/Knover
|
73815ed6f91a301674001a8d29f3a0356e627ea6
|
[
"Apache-2.0"
] | 1
|
2020-07-04T04:58:47.000Z
|
2020-07-04T04:58:47.000Z
|
knover/tasks/__init__.py
|
Vonderland/Knover
|
73815ed6f91a301674001a8d29f3a0356e627ea6
|
[
"Apache-2.0"
] | 1
|
2020-07-04T04:38:19.000Z
|
2020-07-04T04:38:19.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define task."""
from knover.core.task import Task
from knover.utils import parse_args
TASK_REGISTRY = {}
__all__ = [
"TASK_REGISTRY",
"register_task",
"create_task",
"add_cmdline_args"
]
def register_task(name):
"""Register a new task class."""
def __wrapped__(cls):
if name in TASK_REGISTRY:
raise ValueError(f"Cannot register duplicate task ({name})")
if not issubclass(cls, Task):
raise ValueError(f"Task ({name}: {cls.__name__}) must extend Task")
TASK_REGISTRY[name] = cls
return cls
return __wrapped__
def create_task(args) -> Task:
"""Create a task."""
return TASK_REGISTRY[args.task](args)
def add_cmdline_args(parser):
"""Add cmdline argument of Task."""
group = parser.add_argument_group("Task")
group.add_argument("--task", type=str, required=True,
help="The task type.")
args = parse_args(parser, allow_unknown=True)
if args.task not in TASK_REGISTRY:
raise ValueError(f"Unknown task type: {args.task}")
TASK_REGISTRY[args.task].add_cmdline_args(parser)
return group
import knover.tasks.classification
import knover.tasks.dense_embedding
import knover.tasks.dialog_generation
import knover.tasks.knowledge_augmented_generation
import knover.tasks.next_sentence_prediction
| 29.757576
| 79
| 0.710285
|
b91eb028b394a5b6b3cadc9b8045844afe8c7082
| 1,397
|
py
|
Python
|
kiez/io/temp_file_handling.py
|
cthoyt/kiez
|
25f9f103ed51d4084e10f7ac532bb24183fe3894
|
[
"BSD-3-Clause"
] | null | null | null |
kiez/io/temp_file_handling.py
|
cthoyt/kiez
|
25f9f103ed51d4084e10f7ac532bb24183fe3894
|
[
"BSD-3-Clause"
] | null | null | null |
kiez/io/temp_file_handling.py
|
cthoyt/kiez
|
25f9f103ed51d4084e10f7ac532bb24183fe3894
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: BSD-3-Clause
# Author: Roman Feldbauer
import logging
from tempfile import NamedTemporaryFile, mkstemp
__all__ = ["create_tempfile_preferably_in_dir"]
def create_tempfile_preferably_in_dir(
suffix: str = None,
prefix: str = None,
directory: str = None,
persistent: bool = False,
):
"""Create a temporary file with precedence for directory if possible, in TMP otherwise.
For example, this is useful to try to save into /dev/shm.
Parameters
---------
suffix: str
suffix of tempfile
prefix: str
prefix of tempfile
directory: str
directory where tempfile should preferably be created
persistent: bool
If True create a persistent file
Returns
-------
path
string path of tempfile
"""
temp_file = mkstemp if persistent else NamedTemporaryFile
try:
handle = temp_file(suffix=suffix, prefix=prefix, dir=directory)
warn = False
except FileNotFoundError:
handle = temp_file(suffix=suffix, prefix=prefix, dir=None)
warn = True
# Extract the path (as string)
try:
path = handle.name
except AttributeError:
_, path = handle
if warn:
logging.warning(
f"Could not create temp file in {directory}. Instead, the path is {path}."
)
return path
| 25.4
| 91
| 0.645669
|
5ff42d896dc54ed117f9d976667e723816959ee4
| 3,354
|
py
|
Python
|
pychron/extraction_line/stop_watch.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 31
|
2016-03-07T02:38:17.000Z
|
2022-02-14T18:23:43.000Z
|
pychron/extraction_line/stop_watch.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 1,626
|
2015-01-07T04:52:35.000Z
|
2022-03-25T19:15:59.000Z
|
pychron/extraction_line/stop_watch.py
|
UIllinoisHALPychron/pychron
|
f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc
|
[
"Apache-2.0"
] | 26
|
2015-05-23T00:10:06.000Z
|
2022-03-07T16:51:57.000Z
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import time
from pyface.timer.timer import Timer
from traits.api import HasTraits, Button, Int, Bool, Property
from traitsui.api import (
Handler,
View,
Item,
UItem,
VGroup,
HGroup,
spring,
Spring,
ButtonEditor,
)
from pychron.core.ui.lcd_editor import LCDEditor
try:
from AppKit import NSSpeechSynthesizer
SPEECHSYNTH = NSSpeechSynthesizer.alloc().initWithVoice_(
"com.apple.speech.synthesis.voice.Vicki"
)
SPEECHSYNTH.setRate_(275)
except ImportError:
SPEECHSYNTH = None
class StopWatchHandler(Handler):
def closed(self, info, isok):
info.object.destroy()
class StopWatch(HasTraits):
start_stop_button = Button
reset_button = Button("Reset")
current_time = Int
call_interval = Int(5)
_alive = Bool
start_stop_label = Property(depends_on="_alive")
_base_time = 0
_timer = None
def destroy(self):
if self._timer:
self._timer.Stop()
def _iter(self):
elapsed = int(round(time.time() - self._start_time))
self.current_time = self._base_time + elapsed
if self.call_interval and not self.current_time % self.call_interval:
if SPEECHSYNTH:
SPEECHSYNTH.startSpeakingString_(str(self.current_time))
def _reset_button_fired(self):
self.current_time = 0
self._base_time = 0
def _start_stop_button_fired(self):
if self._alive:
self._timer.Stop()
self._base_time = self.current_time
else:
self._start_time = time.time()
t = Timer(1000, self._iter)
self._timer = t
self._alive = not self._alive
def traits_view(self):
v = View(
VGroup(
UItem("current_time", editor=LCDEditor()),
HGroup(
UItem(
"start_stop_button",
editor=ButtonEditor(label_value="start_stop_label"),
),
UItem("reset_button", enabled_when="not _alive"),
spring,
Item("call_interval"),
),
),
handler=StopWatchHandler,
title="StopWatch",
)
return v
def _get_start_stop_label(self):
return "Stop" if self._alive else "Start"
if __name__ == "__main__":
s = StopWatch()
s.configure_traits()
# ============= EOF =============================================
| 27.95
| 81
| 0.576923
|
7513c6f14503a52e177f87a88b8df1b56efd057c
| 7,067
|
py
|
Python
|
src/pyspd/loader.py
|
michalbachowski/pyspd
|
fe5bdb37d9b46ef11ac5739ba768d9455286abca
|
[
"MIT"
] | null | null | null |
src/pyspd/loader.py
|
michalbachowski/pyspd
|
fe5bdb37d9b46ef11ac5739ba768d9455286abca
|
[
"MIT"
] | null | null | null |
src/pyspd/loader.py
|
michalbachowski/pyspd
|
fe5bdb37d9b46ef11ac5739ba768d9455286abca
|
[
"MIT"
] | null | null | null |
import importlib
from logging import getLogger
import os
import sys
logger = getLogger(__name__)
class LoaderInterface(object):
"""Interface for any loader class"""
def load(self, names):
"""
Locates and loads plugin modules using built in strategy
Arguments:
:param names: names (paths, module names) to be loaded
:type names: Iterable[str]
"""
raise NotImplementedError()
class LoaderAggregate(LoaderInterface):
"""Plugin loader that aggregates other plugin loaders"""
def __init__(self, loaders):
"""Object initialization
Arguments:
:param loaders: loaders to be aggregated
:type loaders: Iterable[LoaderInterface]
"""
self._loaders = list(loaders)
def load(self, names):
"""
Loads plugins using given loaders
Arguments:
:param names: names (paths, module names) to be loaded
:type names: Iterable[str]
"""
for loader in self._loaders:
loader.load(names)
class LoaderFile(LoaderInterface):
"""Loads plugins in given file or directory.
Loads module from given file or directory"""
def __init__(self):
"""
Object initialization
"""
self._module_name_cache = []
def load(self, names):
"""
Loads a set of plugins at the given path.
Arguments:
:param names: file paths to load plugin from
:type names: Iterable[str]
"""
for path in names:
filename = os.path.basename(path)
dirname = os.path.dirname(path)
self._load_module_from_file(dirname, filename)
def _load_module_from_file(self, path, filename):
"""Loads module from given file and path
Arguments:
:param path: path where file is placed
:type path: string
:param filename: name of file to load
:type filename: string
"""
logger.debug("loading module from file; path=[%s]; filename=[%s]",
path, filename)
(path, name) = self._prepare_module_name(path, filename)
if not self._valid_module_information(path, name):
return
self._import_module(path, name)
def _prepare_module_name(self, plugin_dir, item):
"""
Prepares name of module to load
Arguments:
:param plugin_dir: directory where module is placed
:type plugin_dir: string
:param item: name of module file (might be file of directory)
:type item: string
:returns: string -- name of module to load
"""
if item == '__pycache__':
return (None, None)
if item.startswith('__init__.py'):
return (os.path.dirname(plugin_dir), os.path.basename(plugin_dir))
if item.endswith(".py"):
return (plugin_dir, item[:-3])
if item.endswith(".pyc"):
return (None, None)
if os.path.isdir(os.path.join(plugin_dir, item)):
return (plugin_dir, item)
return (None, None)
def _valid_module_information(self, path, name):
"""Checks if given module name is valid
Arguments:
:param name: module name
:type name: string
:returns: bool -- information whether module name is valid
"""
if name is None:
return False
tmpname = os.path.join(path, name)
if tmpname in self._module_name_cache:
return False
self._module_name_cache.append(tmpname)
return True
def _import_module(self, path, name):
"""
Imports module given as path to directory and module name
Arguments:
:param path: path to import module from
:type path: string
:param name: name of module
:type name: string
:returns: object -- loaded module
"""
if path not in sys.path:
sys.path.insert(0, path)
logger.debug('loading module; module=%s; sys.path=%s', name,
' '.join(sys.path))
importlib.import_module(name)
class LoaderDir(LoaderFile):
"""Loads plugins in given directory.
By default any module (file or directory) inside this directory
will be treated as potential plugin"""
def load(self, names):
"""
Loads a set of plugins at the given path.
Arguments:
:param names: path to directories to load plugins from
:type names: Iterable[str]
"""
for path in names:
logger.debug("Loading directory; dir=[%s]", path)
self._find_plugins_in_path(path)
def _find_plugins_in_path(self, path):
"""Looks for plugins in given path
Arguments:
:param path: path to search for plugins in
:type path: string
"""
plugin_dir = os.path.realpath(path)
for filename in os.listdir(plugin_dir):
self._load_module_from_file(plugin_dir, filename)
class LoaderRecursiveDir(LoaderFile):
"""Loads plugins in given directory recursively.
By default any files inside this directory
will be treated as potential plugin"""
def load(self, names):
"""
Loads a set of plugins at the given path.
Arguments:
:param names: path to directories to load plugins from
:type names: Iterable[str]
"""
logger.debug('loading plugins from paths: %s', ' '.join(names))
for path in names:
self._find_plugins_in_path(path)
def _find_plugins_in_path(self, path):
"""Looks for plugins in given path
Arguments:
:param path: path to search for plugins in
:type path: string
"""
plugin_dir = os.path.realpath(path)
for filename in os.listdir(plugin_dir):
self._handle_path(plugin_dir, filename)
def _handle_path(self, plugin_dir, filename):
tmp_dir = os.path.join(plugin_dir, filename)
if os.path.isdir(tmp_dir):
logger.debug('filename is dir, searching recursively; name=%s',
filename)
self._find_plugins_in_path(tmp_dir)
return
logger.debug('filename is file, loading; name=%s', filename)
self._load_module_from_file(plugin_dir, filename)
class LoaderModule(LoaderInterface):
"""Loads plugins given as pure python module name (foo.bar.baz).
Loader expects that PYTHONPATH is set correctly
"""
def load(self, names):
"""
Loads plugins from given modules
Arguments:
:param names: names of modules to be loaded
:type names: Iterable[str]
"""
for module in names:
importlib.import_module(module)
| 30.330472
| 78
| 0.582991
|
25e726f2c31d9843eed6bb79fd87e4c01d3760ef
| 40,525
|
py
|
Python
|
src/_pytest/assertion/rewrite.py
|
rosemichaele/pytest
|
1c0ab3c2a32f7932378a1c37106d082784cb4700
|
[
"MIT"
] | 2
|
2020-08-30T13:12:52.000Z
|
2020-09-03T05:38:28.000Z
|
src/_pytest/assertion/rewrite.py
|
rosemichaele/pytest
|
1c0ab3c2a32f7932378a1c37106d082784cb4700
|
[
"MIT"
] | 9
|
2020-08-11T15:19:55.000Z
|
2022-03-12T00:11:12.000Z
|
src/_pytest/assertion/rewrite.py
|
rosemichaele/pytest
|
1c0ab3c2a32f7932378a1c37106d082784cb4700
|
[
"MIT"
] | 3
|
2020-12-03T13:39:33.000Z
|
2021-04-13T02:58:04.000Z
|
"""Rewrite assertion AST to produce nice error messages"""
import ast
import errno
import functools
import importlib.abc
import importlib.machinery
import importlib.util
import io
import itertools
import marshal
import os
import struct
import sys
import tokenize
import types
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import Tuple
from _pytest._io.saferepr import saferepr
from _pytest._version import version
from _pytest.assertion import util
from _pytest.assertion.util import ( # noqa: F401
format_explanation as _format_explanation,
)
from _pytest.compat import fspath
from _pytest.pathlib import fnmatch_ex
from _pytest.pathlib import Path
from _pytest.pathlib import PurePath
# pytest caches rewritten pycs in pycache dirs
PYTEST_TAG = "{}-pytest-{}".format(sys.implementation.cache_tag, version)
PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
class AssertionRewritingHook(importlib.abc.MetaPathFinder):
"""PEP302/PEP451 import hook which rewrites asserts."""
def __init__(self, config):
self.config = config
try:
self.fnpats = config.getini("python_files")
except ValueError:
self.fnpats = ["test_*.py", "*_test.py"]
self.session = None
self._rewritten_names = set() # type: Set[str]
self._must_rewrite = set() # type: Set[str]
# flag to guard against trying to rewrite a pyc file while we are already writing another pyc file,
# which might result in infinite recursion (#3506)
self._writing_pyc = False
self._basenames_to_check_rewrite = {"conftest"}
self._marked_for_rewrite_cache = {} # type: Dict[str, bool]
self._session_paths_checked = False
def set_session(self, session):
self.session = session
self._session_paths_checked = False
# Indirection so we can mock calls to find_spec originated from the hook during testing
_find_spec = importlib.machinery.PathFinder.find_spec
def find_spec(self, name, path=None, target=None):
if self._writing_pyc:
return None
state = self.config._assertstate
if self._early_rewrite_bailout(name, state):
return None
state.trace("find_module called for: %s" % name)
spec = self._find_spec(name, path)
if (
# the import machinery could not find a file to import
spec is None
# this is a namespace package (without `__init__.py`)
# there's nothing to rewrite there
# python3.5 - python3.6: `namespace`
# python3.7+: `None`
or spec.origin == "namespace"
or spec.origin is None
# we can only rewrite source files
or not isinstance(spec.loader, importlib.machinery.SourceFileLoader)
# if the file doesn't exist, we can't rewrite it
or not os.path.exists(spec.origin)
):
return None
else:
fn = spec.origin
if not self._should_rewrite(name, fn, state):
return None
return importlib.util.spec_from_file_location(
name,
fn,
loader=self,
submodule_search_locations=spec.submodule_search_locations,
)
def create_module(self, spec):
return None # default behaviour is fine
def exec_module(self, module):
fn = Path(module.__spec__.origin)
state = self.config._assertstate
self._rewritten_names.add(module.__name__)
# The requested module looks like a test file, so rewrite it. This is
# the most magical part of the process: load the source, rewrite the
# asserts, and load the rewritten source. We also cache the rewritten
# module code in a special pyc. We must be aware of the possibility of
# concurrent pytest processes rewriting and loading pycs. To avoid
# tricky race conditions, we maintain the following invariant: The
# cached pyc is always a complete, valid pyc. Operations on it must be
# atomic. POSIX's atomic rename comes in handy.
write = not sys.dont_write_bytecode
cache_dir = get_cache_dir(fn)
if write:
ok = try_makedirs(cache_dir)
if not ok:
write = False
state.trace("read only directory: {}".format(cache_dir))
cache_name = fn.name[:-3] + PYC_TAIL
pyc = cache_dir / cache_name
# Notice that even if we're in a read-only directory, I'm going
# to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn, pyc, state.trace)
if co is None:
state.trace("rewriting {!r}".format(fn))
source_stat, co = _rewrite_test(fn, self.config)
if write:
self._writing_pyc = True
try:
_write_pyc(state, co, source_stat, pyc)
finally:
self._writing_pyc = False
else:
state.trace("found cached rewritten pyc for {}".format(fn))
exec(co, module.__dict__)
def _early_rewrite_bailout(self, name, state):
"""This is a fast way to get out of rewriting modules. Profiling has
shown that the call to PathFinder.find_spec (inside of the find_spec
from this class) is a major slowdown, so, this method tries to
filter what we're sure won't be rewritten before getting to it.
"""
if self.session is not None and not self._session_paths_checked:
self._session_paths_checked = True
for path in self.session._initialpaths:
# Make something as c:/projects/my_project/path.py ->
# ['c:', 'projects', 'my_project', 'path.py']
parts = str(path).split(os.path.sep)
# add 'path' to basenames to be checked.
self._basenames_to_check_rewrite.add(os.path.splitext(parts[-1])[0])
# Note: conftest already by default in _basenames_to_check_rewrite.
parts = name.split(".")
if parts[-1] in self._basenames_to_check_rewrite:
return False
# For matching the name it must be as if it was a filename.
path = PurePath(os.path.sep.join(parts) + ".py")
for pat in self.fnpats:
# if the pattern contains subdirectories ("tests/**.py" for example) we can't bail out based
# on the name alone because we need to match against the full path
if os.path.dirname(pat):
return False
if fnmatch_ex(pat, path):
return False
if self._is_marked_for_rewrite(name, state):
return False
state.trace("early skip of rewriting module: {}".format(name))
return True
def _should_rewrite(self, name, fn, state):
# always rewrite conftest files
if os.path.basename(fn) == "conftest.py":
state.trace("rewriting conftest file: {!r}".format(fn))
return True
if self.session is not None:
if self.session.isinitpath(fn):
state.trace(
"matched test file (was specified on cmdline): {!r}".format(fn)
)
return True
# modules not passed explicitly on the command line are only
# rewritten if they match the naming convention for test files
fn_path = PurePath(fn)
for pat in self.fnpats:
if fnmatch_ex(pat, fn_path):
state.trace("matched test file {!r}".format(fn))
return True
return self._is_marked_for_rewrite(name, state)
def _is_marked_for_rewrite(self, name: str, state):
try:
return self._marked_for_rewrite_cache[name]
except KeyError:
for marked in self._must_rewrite:
if name == marked or name.startswith(marked + "."):
state.trace(
"matched marked file {!r} (from {!r})".format(name, marked)
)
self._marked_for_rewrite_cache[name] = True
return True
self._marked_for_rewrite_cache[name] = False
return False
def mark_rewrite(self, *names: str) -> None:
"""Mark import names as needing to be rewritten.
The named module or package as well as any nested modules will
be rewritten on import.
"""
already_imported = (
set(names).intersection(sys.modules).difference(self._rewritten_names)
)
for name in already_imported:
mod = sys.modules[name]
if not AssertionRewriter.is_rewrite_disabled(
mod.__doc__ or ""
) and not isinstance(mod.__loader__, type(self)):
self._warn_already_imported(name)
self._must_rewrite.update(names)
self._marked_for_rewrite_cache.clear()
def _warn_already_imported(self, name):
from _pytest.warning_types import PytestAssertRewriteWarning
from _pytest.warnings import _issue_warning_captured
_issue_warning_captured(
PytestAssertRewriteWarning(
"Module already imported so cannot be rewritten: %s" % name
),
self.config.hook,
stacklevel=5,
)
def get_data(self, pathname):
"""Optional PEP302 get_data API."""
with open(pathname, "rb") as f:
return f.read()
def _write_pyc_fp(fp, source_stat, co):
# Technically, we don't have to have the same pyc format as
# (C)Python, since these "pycs" should never be seen by builtin
# import. However, there's little reason deviate.
fp.write(importlib.util.MAGIC_NUMBER)
# as of now, bytecode header expects 32-bit numbers for size and mtime (#4903)
mtime = int(source_stat.st_mtime) & 0xFFFFFFFF
size = source_stat.st_size & 0xFFFFFFFF
# "<LL" stands for 2 unsigned longs, little-ending
fp.write(struct.pack("<LL", mtime, size))
fp.write(marshal.dumps(co))
if sys.platform == "win32":
from atomicwrites import atomic_write
def _write_pyc(state, co, source_stat, pyc):
try:
with atomic_write(fspath(pyc), mode="wb", overwrite=True) as fp:
_write_pyc_fp(fp, source_stat, co)
except EnvironmentError as e:
state.trace("error writing pyc file at {}: errno={}".format(pyc, e.errno))
# we ignore any failure to write the cache file
# there are many reasons, permission-denied, pycache dir being a
# file etc.
return False
return True
else:
def _write_pyc(state, co, source_stat, pyc):
proc_pyc = "{}.{}".format(pyc, os.getpid())
try:
fp = open(proc_pyc, "wb")
except EnvironmentError as e:
state.trace(
"error writing pyc file at {}: errno={}".format(proc_pyc, e.errno)
)
return False
try:
_write_pyc_fp(fp, source_stat, co)
os.rename(proc_pyc, fspath(pyc))
except BaseException as e:
state.trace("error writing pyc file at {}: errno={}".format(pyc, e.errno))
# we ignore any failure to write the cache file
# there are many reasons, permission-denied, pycache dir being a
# file etc.
return False
finally:
fp.close()
return True
def _rewrite_test(fn, config):
"""read and rewrite *fn* and return the code object."""
fn = fspath(fn)
stat = os.stat(fn)
with open(fn, "rb") as f:
source = f.read()
tree = ast.parse(source, filename=fn)
rewrite_asserts(tree, source, fn, config)
co = compile(tree, fn, "exec", dont_inherit=True)
return stat, co
def _read_pyc(source, pyc, trace=lambda x: None):
"""Possibly read a pytest pyc containing rewritten code.
Return rewritten code if successful or None if not.
"""
try:
fp = open(fspath(pyc), "rb")
except IOError:
return None
with fp:
try:
stat_result = os.stat(fspath(source))
mtime = int(stat_result.st_mtime)
size = stat_result.st_size
data = fp.read(12)
except EnvironmentError as e:
trace("_read_pyc({}): EnvironmentError {}".format(source, e))
return None
# Check for invalid or out of date pyc file.
if (
len(data) != 12
or data[:4] != importlib.util.MAGIC_NUMBER
or struct.unpack("<LL", data[4:]) != (mtime & 0xFFFFFFFF, size & 0xFFFFFFFF)
):
trace("_read_pyc(%s): invalid or out of date pyc" % source)
return None
try:
co = marshal.load(fp)
except Exception as e:
trace("_read_pyc({}): marshal.load error {}".format(source, e))
return None
if not isinstance(co, types.CodeType):
trace("_read_pyc(%s): not a code object" % source)
return None
return co
def rewrite_asserts(mod, source, module_path=None, config=None):
"""Rewrite the assert statements in mod."""
AssertionRewriter(module_path, config, source).run(mod)
def _saferepr(obj):
"""Get a safe repr of an object for assertion error messages.
The assertion formatting (util.format_explanation()) requires
newlines to be escaped since they are a special character for it.
Normally assertion.util.format_explanation() does this but for a
custom repr it is possible to contain one of the special escape
sequences, especially '\n{' and '\n}' are likely to be present in
JSON reprs.
"""
return saferepr(obj).replace("\n", "\\n")
def _format_assertmsg(obj):
"""Format the custom assertion message given.
For strings this simply replaces newlines with '\n~' so that
util.format_explanation() will preserve them instead of escaping
newlines. For other objects saferepr() is used first.
"""
# reprlib appears to have a bug which means that if a string
# contains a newline it gets escaped, however if an object has a
# .__repr__() which contains newlines it does not get escaped.
# However in either case we want to preserve the newline.
replaces = [("\n", "\n~"), ("%", "%%")]
if not isinstance(obj, str):
obj = saferepr(obj)
replaces.append(("\\n", "\n~"))
for r1, r2 in replaces:
obj = obj.replace(r1, r2)
return obj
def _should_repr_global_name(obj):
if callable(obj):
return False
try:
return not hasattr(obj, "__name__")
except Exception:
return True
def _format_boolop(explanations, is_or):
explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")"
if isinstance(explanation, str):
return explanation.replace("%", "%%")
else:
return explanation.replace(b"%", b"%%")
def _call_reprcompare(ops, results, expls, each_obj):
# type: (Tuple[str, ...], Tuple[bool, ...], Tuple[str, ...], Tuple[object, ...]) -> str
for i, res, expl in zip(range(len(ops)), results, expls):
try:
done = not res
except Exception:
done = True
if done:
break
if util._reprcompare is not None:
custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1])
if custom is not None:
return custom
return expl
def _call_assertion_pass(lineno, orig, expl):
# type: (int, str, str) -> None
if util._assertion_pass is not None:
util._assertion_pass(lineno, orig, expl)
def _check_if_assertion_pass_impl():
# type: () -> bool
"""Checks if any plugins implement the pytest_assertion_pass hook
in order not to generate explanation unecessarily (might be expensive)"""
return True if util._assertion_pass else False
UNARY_MAP = {ast.Not: "not %s", ast.Invert: "~%s", ast.USub: "-%s", ast.UAdd: "+%s"}
BINOP_MAP = {
ast.BitOr: "|",
ast.BitXor: "^",
ast.BitAnd: "&",
ast.LShift: "<<",
ast.RShift: ">>",
ast.Add: "+",
ast.Sub: "-",
ast.Mult: "*",
ast.Div: "/",
ast.FloorDiv: "//",
ast.Mod: "%%", # escaped for string formatting
ast.Eq: "==",
ast.NotEq: "!=",
ast.Lt: "<",
ast.LtE: "<=",
ast.Gt: ">",
ast.GtE: ">=",
ast.Pow: "**",
ast.Is: "is",
ast.IsNot: "is not",
ast.In: "in",
ast.NotIn: "not in",
ast.MatMult: "@",
}
def set_location(node, lineno, col_offset):
"""Set node location information recursively."""
def _fix(node, lineno, col_offset):
if "lineno" in node._attributes:
node.lineno = lineno
if "col_offset" in node._attributes:
node.col_offset = col_offset
for child in ast.iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, lineno, col_offset)
return node
def _get_assertion_exprs(src: bytes) -> Dict[int, str]:
"""Returns a mapping from {lineno: "assertion test expression"}"""
ret = {} # type: Dict[int, str]
depth = 0
lines = [] # type: List[str]
assert_lineno = None # type: Optional[int]
seen_lines = set() # type: Set[int]
def _write_and_reset() -> None:
nonlocal depth, lines, assert_lineno, seen_lines
assert assert_lineno is not None
ret[assert_lineno] = "".join(lines).rstrip().rstrip("\\")
depth = 0
lines = []
assert_lineno = None
seen_lines = set()
tokens = tokenize.tokenize(io.BytesIO(src).readline)
for tp, source, (lineno, offset), _, line in tokens:
if tp == tokenize.NAME and source == "assert":
assert_lineno = lineno
elif assert_lineno is not None:
# keep track of depth for the assert-message `,` lookup
if tp == tokenize.OP and source in "([{":
depth += 1
elif tp == tokenize.OP and source in ")]}":
depth -= 1
if not lines:
lines.append(line[offset:])
seen_lines.add(lineno)
# a non-nested comma separates the expression from the message
elif depth == 0 and tp == tokenize.OP and source == ",":
# one line assert with message
if lineno in seen_lines and len(lines) == 1:
offset_in_trimmed = offset + len(lines[-1]) - len(line)
lines[-1] = lines[-1][:offset_in_trimmed]
# multi-line assert with message
elif lineno in seen_lines:
lines[-1] = lines[-1][:offset]
# multi line assert with escapd newline before message
else:
lines.append(line[:offset])
_write_and_reset()
elif tp in {tokenize.NEWLINE, tokenize.ENDMARKER}:
_write_and_reset()
elif lines and lineno not in seen_lines:
lines.append(line)
seen_lines.add(lineno)
return ret
class AssertionRewriter(ast.NodeVisitor):
"""Assertion rewriting implementation.
The main entrypoint is to call .run() with an ast.Module instance,
this will then find all the assert statements and rewrite them to
provide intermediate values and a detailed assertion error. See
http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html
for an overview of how this works.
The entry point here is .run() which will iterate over all the
statements in an ast.Module and for each ast.Assert statement it
finds call .visit() with it. Then .visit_Assert() takes over and
is responsible for creating new ast statements to replace the
original assert statement: it rewrites the test of an assertion
to provide intermediate values and replace it with an if statement
which raises an assertion error with a detailed explanation in
case the expression is false and calls pytest_assertion_pass hook
if expression is true.
For this .visit_Assert() uses the visitor pattern to visit all the
AST nodes of the ast.Assert.test field, each visit call returning
an AST node and the corresponding explanation string. During this
state is kept in several instance attributes:
:statements: All the AST statements which will replace the assert
statement.
:variables: This is populated by .variable() with each variable
used by the statements so that they can all be set to None at
the end of the statements.
:variable_counter: Counter to create new unique variables needed
by statements. Variables are created using .variable() and
have the form of "@py_assert0".
:expl_stmts: The AST statements which will be executed to get
data from the assertion. This is the code which will construct
the detailed assertion message that is used in the AssertionError
or for the pytest_assertion_pass hook.
:explanation_specifiers: A dict filled by .explanation_param()
with %-formatting placeholders and their corresponding
expressions to use in the building of an assertion message.
This is used by .pop_format_context() to build a message.
:stack: A stack of the explanation_specifiers dicts maintained by
.push_format_context() and .pop_format_context() which allows
to build another %-formatted string while already building one.
This state is reset on every new assert statement visited and used
by the other visitors.
"""
def __init__(self, module_path, config, source):
super().__init__()
self.module_path = module_path
self.config = config
if config is not None:
self.enable_assertion_pass_hook = config.getini(
"enable_assertion_pass_hook"
)
else:
self.enable_assertion_pass_hook = False
self.source = source
@functools.lru_cache(maxsize=1)
def _assert_expr_to_lineno(self):
return _get_assertion_exprs(self.source)
def run(self, mod: ast.Module) -> None:
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [
ast.alias("builtins", "@py_builtins"),
ast.alias("_pytest.assertion.rewrite", "@pytest_ar"),
]
doc = getattr(mod, "docstring", None)
expect_docstring = doc is None
if doc is not None and self.is_rewrite_disabled(doc):
return
pos = 0
lineno = 1
for item in mod.body:
if (
expect_docstring
and isinstance(item, ast.Expr)
and isinstance(item.value, ast.Str)
):
doc = item.value.s
if self.is_rewrite_disabled(doc):
return
expect_docstring = False
elif (
not isinstance(item, ast.ImportFrom)
or item.level > 0
or item.module != "__future__"
):
lineno = item.lineno
break
pos += 1
else:
lineno = item.lineno
imports = [
ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases
]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod] # type: List[ast.AST]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = [] # type: List
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (
isinstance(field, ast.AST)
# Don't recurse into expressions as they can't contain
# asserts.
and not isinstance(field, ast.expr)
):
nodes.append(field)
@staticmethod
def is_rewrite_disabled(docstring):
return "PYTEST_DONT_REWRITE" in docstring
def variable(self):
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load())
def display(self, expr):
"""Call saferepr on the expression."""
return self.helper("_saferepr", expr)
def helper(self, name, *args):
"""Call a helper in this module."""
py_name = ast.Name("@pytest_ar", ast.Load())
attr = ast.Attribute(py_name, name, ast.Load())
return ast.Call(attr, list(args), [])
def builtin(self, name):
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr):
"""Return a new named %-formatting placeholder for expr.
This creates a %-formatting placeholder for expr in the
current formatting context, e.g. ``%(py0)s``. The placeholder
and expr are placed in the current format context so that it
can be used on the next call to .pop_format_context().
"""
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
"""Create a new formatting context.
The format context is used for when an explanation wants to
have a variable value formatted in the assertion message. In
this case the value required can be added using
.explanation_param(). Finally .pop_format_context() is used
to format a string of %-formatted values as added by
.explanation_param().
"""
self.explanation_specifiers = {} # type: Dict[str, ast.expr]
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
"""Format the %-formatted string with current format context.
The expl_expr should be an ast.Str instance constructed from
the %-placeholders created by .explanation_param(). This will
add the required code to format said string to .expl_stmts and
return the ast.Name instance of the formatted string.
"""
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
if self.enable_assertion_pass_hook:
self.format_variables.append(name)
self.expl_stmts.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node):
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Assert(self, assert_):
"""Return the AST statements to replace the ast.Assert instance.
This rewrites the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1:
from _pytest.warning_types import PytestAssertRewriteWarning
import warnings
warnings.warn_explicit(
PytestAssertRewriteWarning(
"assertion is always true, perhaps remove parentheses?"
),
category=None,
filename=fspath(self.module_path),
lineno=assert_.lineno,
)
self.statements = [] # type: List[ast.stmt]
self.variables = [] # type: List[str]
self.variable_counter = itertools.count()
if self.enable_assertion_pass_hook:
self.format_variables = [] # type: List[str]
self.stack = [] # type: List[Dict[str, ast.expr]]
self.expl_stmts = [] # type: List[ast.stmt]
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
negation = ast.UnaryOp(ast.Not(), top_condition)
if self.enable_assertion_pass_hook: # Experimental pytest_assertion_pass hook
msg = self.pop_format_context(ast.Str(explanation))
# Failed
if assert_.msg:
assertmsg = self.helper("_format_assertmsg", assert_.msg)
gluestr = "\n>assert "
else:
assertmsg = ast.Str("")
gluestr = "assert "
err_explanation = ast.BinOp(ast.Str(gluestr), ast.Add(), msg)
err_msg = ast.BinOp(assertmsg, ast.Add(), err_explanation)
err_name = ast.Name("AssertionError", ast.Load())
fmt = self.helper("_format_explanation", err_msg)
exc = ast.Call(err_name, [fmt], [])
raise_ = ast.Raise(exc, None)
statements_fail = []
statements_fail.extend(self.expl_stmts)
statements_fail.append(raise_)
# Passed
fmt_pass = self.helper("_format_explanation", msg)
orig = self._assert_expr_to_lineno()[assert_.lineno]
hook_call_pass = ast.Expr(
self.helper(
"_call_assertion_pass",
ast.Num(assert_.lineno),
ast.Str(orig),
fmt_pass,
)
)
# If any hooks implement assert_pass hook
hook_impl_test = ast.If(
self.helper("_check_if_assertion_pass_impl"),
self.expl_stmts + [hook_call_pass],
[],
)
statements_pass = [hook_impl_test]
# Test for assertion condition
main_test = ast.If(negation, statements_fail, statements_pass)
self.statements.append(main_test)
if self.format_variables:
variables = [
ast.Name(name, ast.Store()) for name in self.format_variables
]
clear_format = ast.Assign(variables, ast.NameConstant(None))
self.statements.append(clear_format)
else: # Original assertion rewriting
# Create failure message.
body = self.expl_stmts
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper("_format_assertmsg", assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("_format_explanation", msg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast.Call(err_name, [fmt], [])
raise_ = ast.Raise(exc, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store()) for name in self.variables]
clear = ast.Assign(variables, ast.NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast.Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("_should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.expl_stmts
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuiting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = [] # type: List[ast.stmt]
# cond is set in a prior loop iteration below
self.expl_stmts.append(ast.If(cond, fail_inner, [])) # noqa
self.expl_stmts = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast.Call(app, [expl_format], [])
self.expl_stmts.append(ast.Expr(call))
if i < levels:
cond = res # type: ast.expr
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = [] # type: List[ast.stmt]
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.expl_stmts = fail_save
expl_template = self.helper("_format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = UNARY_MAP[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = BINOP_MAP[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "({} {} {})".format(left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Call(self, call):
"""
visit `ast.Call` nodes
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: # **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "{}({})".format(func_expl, ", ".join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "{}\n{{{} = {}\n}}".format(res_expl, res_expl, expl)
return res, outer_expl
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
new_starred = ast.Starred(res, starred.ctx)
return new_starred, "*" + expl
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp: ast.Compare):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (ast.Compare, ast.BoolOp)):
left_expl = "({})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (ast.Compare, ast.BoolOp)):
next_expl = "({})".format(next_expl)
results.append(next_res)
sym = BINOP_MAP[op.__class__]
syms.append(ast.Str(sym))
expl = "{} {} {}".format(left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper(
"_call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()),
)
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names) # type: ast.expr
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
def try_makedirs(cache_dir) -> bool:
"""Attempts to create the given directory and sub-directories exist, returns True if
successful or it already exists"""
try:
os.makedirs(fspath(cache_dir), exist_ok=True)
except (FileNotFoundError, NotADirectoryError, FileExistsError):
# One of the path components was not a directory:
# - we're in a zip file
# - it is a file
return False
except PermissionError:
return False
except OSError as e:
# as of now, EROFS doesn't have an equivalent OSError-subclass
if e.errno == errno.EROFS:
return False
raise
return True
def get_cache_dir(file_path: Path) -> Path:
"""Returns the cache directory to write .pyc files for the given .py file path"""
# Type ignored until added in next mypy release.
if sys.version_info >= (3, 8) and sys.pycache_prefix: # type: ignore
# given:
# prefix = '/tmp/pycs'
# path = '/home/user/proj/test_app.py'
# we want:
# '/tmp/pycs/home/user/proj'
return Path(sys.pycache_prefix) / Path(*file_path.parts[1:-1]) # type: ignore
else:
# classic pycache directory
return file_path.parent / "__pycache__"
| 38.33964
| 107
| 0.59921
|
944efdc61f41d65e230a9d586a4aa4a5d14f451a
| 5,719
|
py
|
Python
|
src/thumbor_video_engine/engines/video.py
|
fdintino/thumbor-video-engine
|
c4ed8bd191e17d9b0b606468fb22fa77de36cc05
|
[
"MIT"
] | null | null | null |
src/thumbor_video_engine/engines/video.py
|
fdintino/thumbor-video-engine
|
c4ed8bd191e17d9b0b606468fb22fa77de36cc05
|
[
"MIT"
] | null | null | null |
src/thumbor_video_engine/engines/video.py
|
fdintino/thumbor-video-engine
|
c4ed8bd191e17d9b0b606468fb22fa77de36cc05
|
[
"MIT"
] | null | null | null |
from thumbor.engines import BaseEngine
from thumbor.utils import logger
from thumbor_video_engine.utils import (
named_tmp_file, is_mp4, is_qt, is_animated, is_animated_gif)
def patch_baseengine_get_mimetype():
"""
Monkey-patch BaseEngine.get_mimetype() to recognize all mp4 files as video/mp4
"""
orig_get_mimetype = BaseEngine.get_mimetype
def get_mimetype(cls, buffer):
mimetype = orig_get_mimetype(buffer)
if mimetype is not None:
return mimetype
elif is_qt(buffer):
return 'video/quicktime'
elif is_mp4(buffer):
return 'video/mp4'
BaseEngine.get_mimetype = classmethod(get_mimetype)
patch_baseengine_get_mimetype()
class Engine(object):
"""An engine that dispatches video files to ffmpeg, and others to PIL"""
def __init__(self, context):
self.engine = None
self.context = context
self.ffmpeg_handle_animated_gif = context.config.FFMPEG_HANDLE_ANIMATED_GIF
self.ffmpeg_handle_animated_webp = True
self.use_gif_engine = context.config.FFMPEG_USE_GIFSICLE_ENGINE
@property
def image_engine(self):
self.context.modules.importer.import_item('IMAGE_ENGINE', 'Engine')
return self.context.modules.importer.image_engine(self.context)
@property
def ffmpeg_engine(self):
if not hasattr(self.context.modules, 'ffmpeg_engine'):
# Instantiate the video engine class from the config (default is
# thumbor_video_engine.engines.ffmpeg)
self.context.modules.importer.import_item('FFMPEG_ENGINE', 'Engine')
self.context.modules.ffmpeg_engine = (
self.context.modules.importer.ffmpeg_engine(self.context))
return self.context.modules.ffmpeg_engine
def get_engine(self, buffer, extension):
mime = BaseEngine.get_mimetype(buffer)
is_gif = extension == '.gif'
is_webp = extension == '.webp'
accepts_video = getattr(self.context.request, "accepts_video", False)
accepts_webp = self.context.request.accepts_webp
if is_webp and self.ffmpeg_handle_animated_webp and is_animated(buffer):
return self.ffmpeg_engine
elif is_gif and self.ffmpeg_handle_animated_gif and is_animated_gif(buffer):
if self.context.config.FFMPEG_GIF_AUTO_H265:
self.context.request.should_vary = True
if accepts_video:
logger.debug("FFMPEG_GIF_AUTO_H265 setting format to h264")
self.context.request.format = 'h265'
elif self.context.config.FFMPEG_GIF_AUTO_H264:
self.context.request.should_vary = True
if accepts_video:
logger.debug("FFMPEG_GIF_AUTO_H264 setting format to h264")
self.context.request.format = 'h264'
elif self.context.config.FFMPEG_GIF_AUTO_WEBP:
self.context.request.should_vary = True
if accepts_webp:
logger.debug("FFMPEG_GIF_AUTO_WEBP setting format to webp")
self.context.request.format = 'webp'
return self.ffmpeg_engine
elif is_gif and self.use_gif_engine:
return self.context.modules.gif_engine
elif mime.startswith('video/'):
return self.ffmpeg_engine
else:
return self.image_engine
def load(self, buffer, extension):
self.engine = self.get_engine(buffer, extension)
if self.context.request.format and not self.context.request.filters:
# RequestParameters.filters is an empty list when none are in the url,
# and ImagingHandler._write_results_to_client assumes that if
# context.request.format is set then it came from the format filter.
# Since we set the format in the engine this causes a TypeError,
# so we need to ensure that it is a string here.
self.context.request.filters = ""
logger.debug("Set engine to %s (extension %s)" % (
type(self.engine).__module__, extension))
still_frame_pos = getattr(self.context.request, 'still_position', None)
# Are we requesting a still frame?
if self.engine is self.ffmpeg_engine and still_frame_pos:
with named_tmp_file(data=buffer, suffix=extension) as src_file:
buffer = self.ffmpeg_engine.run_ffmpeg(
src_file, 'png', ['-ss', still_frame_pos, '-frames:v', '1'])
self.engine = self.image_engine
extension = '.png'
if not self.context.request.format:
self.context.request.format = 'jpg'
# Change the default extension if we're transcoding video
if self.engine is self.ffmpeg_engine and extension == ".jpg":
extension = ".mp4"
self.extension = extension
self.engine.load(buffer, extension)
def is_multiple(self):
return False
def cleanup(self):
pass
def __getattr__(self, attr):
if not self.__dict__.get('engine'):
raise AttributeError("'Engine' object has no attribute '%s'" % attr)
return getattr(self.engine, attr)
def __setattr__(self, attr, value):
if attr in ('engine', 'ffmpeg_handle_animated_gif', 'use_gif_engine'):
self.__dict__[attr] = value
elif attr in ('context', 'extension'):
self.__dict__[attr] = value
if self.engine:
setattr(self.engine, attr, value)
elif self.engine:
setattr(self.engine, attr, value)
else:
self.__dict__[attr] = value
| 41.143885
| 84
| 0.641021
|
6c9e43b2ee4138b7b8de9f5c4abcc571ffda0712
| 455
|
py
|
Python
|
packages/python/plotly/plotly/validators/layout/modebar/_remove.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/layout/modebar/_remove.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/layout/modebar/_remove.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class RemoveValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="remove", parent_name="layout.modebar", **kwargs):
super(RemoveValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "modebar"),
**kwargs,
)
| 35
| 85
| 0.663736
|
7d0e5a1c920820f334fa7c421fc9f97eeef3866d
| 2,310
|
py
|
Python
|
test/functional/mempool_spend_coinbase.py
|
XbitCC/xbitcoin
|
2c71b1adc311680e8f3aa977b42029b1a26164f1
|
[
"MIT"
] | 2
|
2021-10-16T06:16:02.000Z
|
2022-03-26T21:48:38.000Z
|
test/functional/mempool_spend_coinbase.py
|
XbitCC/xbitcoin
|
2c71b1adc311680e8f3aa977b42029b1a26164f1
|
[
"MIT"
] | null | null | null |
test/functional/mempool_spend_coinbase.py
|
XbitCC/xbitcoin
|
2c71b1adc311680e8f3aa977b42029b1a26164f1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The XBit Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test spending coinbase transactions.
The coinbase transaction in block N can appear in block
N+100... so is valid in the mempool when the best block
height is N+99.
This test makes sure coinbase spends that will be mature
in the next block are accepted into the memory pool,
but less mature coinbase spends are NOT.
"""
from test_framework.test_framework import XBitTestFramework
from test_framework.blocktools import create_raw_transaction
from test_framework.util import assert_equal, assert_raises_rpc_error
class MempoolSpendCoinbaseTest(XBitTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
node0_address = self.nodes[0].getnewaddress()
# Coinbase at height chain_height-100+1 ok in mempool, should
# get mined. Coinbase at height chain_height-100+2 is
# is too immature to spend.
b = [self.nodes[0].getblockhash(n) for n in range(101, 103)]
coinbase_txids = [self.nodes[0].getblock(h)['tx'][0] for h in b]
spends_raw = [create_raw_transaction(self.nodes[0], txid, node0_address, amount=49.99) for txid in coinbase_txids]
spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
# coinbase at height 102 should be too immature to spend
assert_raises_rpc_error(-26,"bad-txns-premature-spend-of-coinbase", self.nodes[0].sendrawtransaction, spends_raw[1])
# mempool should have just spend_101:
assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
# mine a block, spend_101 should get confirmed
self.nodes[0].generate(1)
assert_equal(set(self.nodes[0].getrawmempool()), set())
# ... and now height 102 can be spent:
spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])
if __name__ == '__main__':
MempoolSpendCoinbaseTest().main()
| 40.526316
| 124
| 0.719048
|
9ba5b56c164f3d7509641b5248c689e7fb53ad48
| 8,629
|
py
|
Python
|
grr/server/grr_response_server/gui/api_auth_manager_test.py
|
billstackpole/grr
|
203a0a99990a2d4004aed84a5cd822cbda2b418c
|
[
"Apache-2.0"
] | 1
|
2019-03-28T07:09:41.000Z
|
2019-03-28T07:09:41.000Z
|
grr/server/grr_response_server/gui/api_auth_manager_test.py
|
gingogo/grr
|
203a0a99990a2d4004aed84a5cd822cbda2b418c
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/gui/api_auth_manager_test.py
|
gingogo/grr
|
203a0a99990a2d4004aed84a5cd822cbda2b418c
|
[
"Apache-2.0"
] | 1
|
2018-08-30T14:50:24.000Z
|
2018-08-30T14:50:24.000Z
|
#!/usr/bin/env python
"""Tests for the SimpleAPIAuthManager."""
import __builtin__
import mock
from grr_response_core.lib import flags
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_proto import tests_pb2
from grr_response_server.authorization import groups
from grr_response_server.gui import api_auth_manager
from grr_response_server.gui import api_call_router
from grr.test_lib import test_lib
class DummyAuthManagerTestApiRouter(api_call_router.ApiCallRouter):
pass
class DummyAuthManagerTestApiRouter2(api_call_router.ApiCallRouter):
pass
class DummyAuthManagerTestApiRouter3(api_call_router.ApiCallRouter):
pass
class DefaultDummyAuthManagerTestApiRouter(api_call_router.ApiCallRouter):
pass
class DummyAuthManagerTestConfigurableApiRouterParams(
rdf_structs.RDFProtoStruct):
protobuf = tests_pb2.DummyAuthManagerTestConfigurableApiRouterParams
class DummyAuthManagerTestConfigurableApiRouter(api_call_router.ApiCallRouter):
params_type = DummyAuthManagerTestConfigurableApiRouterParams
def __init__(self, params=None):
super(DummyAuthManagerTestConfigurableApiRouter, self).__init__(
params=params)
self.params = params
class DummyGroupAccessManager(groups.GroupAccessManager):
def __init__(self):
self.authorized_groups = {}
self.positive_matches = {"u1": ["g1", "g3"]}
def AuthorizeGroup(self, group, subject):
self.authorized_groups.setdefault(subject, []).append(group)
def MemberOfAuthorizedGroup(self, username, subject):
try:
group_names = self.positive_matches[username]
except KeyError:
return False
for group_name in group_names:
if group_name in self.authorized_groups[subject]:
return True
return False
class APIAuthorizationManagerTest(test_lib.GRRBaseTest):
def setUp(self):
super(APIAuthorizationManagerTest, self).setUp()
# API ACLs are off by default, we need to set this to something so the tests
# exercise the functionality. Each test will supply its own ACL data. We
# also have to set up a default API router that will be used when none of
# the rules matches.
self.config_overrider = test_lib.ConfigOverrider({
"API.RouterACLConfigFile": "dummy",
"API.DefaultRouter": DefaultDummyAuthManagerTestApiRouter.__name__,
"ACL.group_access_manager_class": DummyGroupAccessManager.__name__
})
self.config_overrider.Start()
def tearDown(self):
super(APIAuthorizationManagerTest, self).tearDown()
self.config_overrider.Stop()
def testMatchesIfOneOfUsersIsMatching(self):
acls = """
router: "DummyAuthManagerTestApiRouter"
users:
- "u1"
- "u2"
"""
with mock.patch.object(__builtin__, "open", mock.mock_open(read_data=acls)):
auth_mgr = api_auth_manager.APIAuthorizationManager()
router = auth_mgr.GetRouterForUser("u1")
self.assertEqual(router.__class__, DummyAuthManagerTestApiRouter)
router = auth_mgr.GetRouterForUser("u2")
self.assertEqual(router.__class__, DummyAuthManagerTestApiRouter)
def testReturnsDefaultOnNoMatchByUser(self):
acls = """
router: "DummyAuthManagerTestApiRouter"
users:
- "u1"
- "u2"
"""
with mock.patch.object(__builtin__, "open", mock.mock_open(read_data=acls)):
auth_mgr = api_auth_manager.APIAuthorizationManager()
router = auth_mgr.GetRouterForUser("u4")
self.assertEqual(router.__class__, DefaultDummyAuthManagerTestApiRouter)
def testMatchesFirstRouterIfMultipleRoutersMatchByUser(self):
acls = """
router: "DummyAuthManagerTestApiRouter"
users:
- "u1"
- "u3"
---
router: "DummyAuthManagerTestApiRouter2"
users:
- "u1"
- "u2"
"""
with mock.patch.object(__builtin__, "open", mock.mock_open(read_data=acls)):
auth_mgr = api_auth_manager.APIAuthorizationManager()
router = auth_mgr.GetRouterForUser("u1")
self.assertEqual(router.__class__, DummyAuthManagerTestApiRouter)
def testReturnsFirstRouterWhenMatchingByUser(self):
acls = """
router: "DummyAuthManagerTestApiRouter"
users:
- "u1"
- "u3"
---
router: "DummyAuthManagerTestApiRouter2"
users:
- "u1"
- "u2"
---
router: "DummyAuthManagerTestApiRouter3"
users:
- "u2"
- "u4"
"""
with mock.patch.object(__builtin__, "open", mock.mock_open(read_data=acls)):
auth_mgr = api_auth_manager.APIAuthorizationManager()
router = auth_mgr.GetRouterForUser("u2")
self.assertEqual(router.__class__, DummyAuthManagerTestApiRouter2)
router = auth_mgr.GetRouterForUser("u4")
self.assertTrue(router.__class__, DummyAuthManagerTestApiRouter3)
def testMatchingByGroupWorks(self):
acls = """
router: "DummyAuthManagerTestApiRouter2"
groups:
- "g1"
"""
with mock.patch.object(__builtin__, "open", mock.mock_open(read_data=acls)):
auth_mgr = api_auth_manager.APIAuthorizationManager()
router = auth_mgr.GetRouterForUser("u1")
self.assertEqual(router.__class__, DummyAuthManagerTestApiRouter2)
def testMatchingByUserHasPriorityOverMatchingByGroup(self):
acls = """
router: "DummyAuthManagerTestApiRouter"
users:
- "u1"
---
router: "DummyAuthManagerTestApiRouter2"
groups:
- "g1"
"""
with mock.patch.object(__builtin__, "open", mock.mock_open(read_data=acls)):
auth_mgr = api_auth_manager.APIAuthorizationManager()
router = auth_mgr.GetRouterForUser("u1")
self.assertEqual(router.__class__, DummyAuthManagerTestApiRouter)
def testReturnsFirstRouterWhenMultipleMatchByGroup(self):
acls = """
router: "DummyAuthManagerTestApiRouter"
groups:
- "g3"
---
router: "DummyAuthManagerTestApiRouter2"
groups:
- "g1"
"""
with mock.patch.object(__builtin__, "open", mock.mock_open(read_data=acls)):
auth_mgr = api_auth_manager.APIAuthorizationManager()
router = auth_mgr.GetRouterForUser("u1")
self.assertEqual(router.__class__, DummyAuthManagerTestApiRouter)
def testReturnsFirstMatchingRouterWhenItMatchesByGroupAndOtherByUser(self):
acls = """
router: "DummyAuthManagerTestApiRouter"
groups:
- "g3"
---
router: "DummyAuthManagerTestApiRouter2"
users:
- "u1"
"""
with mock.patch.object(__builtin__, "open", mock.mock_open(read_data=acls)):
auth_mgr = api_auth_manager.APIAuthorizationManager()
router = auth_mgr.GetRouterForUser("u1")
self.assertEqual(router.__class__, DummyAuthManagerTestApiRouter)
def testReturnsDefaultRouterWhenNothingMatchesByGroup(self):
acls = """
router: "DummyAuthManagerTestApiRouter"
groups:
- "g5"
---
router: "DummyAuthManagerTestApiRouter2"
groups:
- "g6"
"""
with mock.patch.object(__builtin__, "open", mock.mock_open(read_data=acls)):
auth_mgr = api_auth_manager.APIAuthorizationManager()
router = auth_mgr.GetRouterForUser("u1")
self.assertEqual(router.__class__, DefaultDummyAuthManagerTestApiRouter)
def testDefaultRouterIsReturnedIfNoConfigFileDefined(self):
"""The default router is returned if no API.RouterACLConfigFile defined."""
with test_lib.ConfigOverrider({"API.RouterACLConfigFile": ""}):
auth_mgr = api_auth_manager.APIAuthorizationManager()
router = auth_mgr.GetRouterForUser("u1")
self.assertEqual(router.__class__, DefaultDummyAuthManagerTestApiRouter)
def testRaisesWhenNonConfigurableRouterInitializedWithParams(self):
acls = """
router: "DummyAuthManagerTestApiRouter"
router_params:
foo: "Oh no!"
bar: 42
users:
- "u1"
"""
with self.assertRaises(
api_auth_manager.ApiCallRouterDoesNotExpectParameters):
with mock.patch.object(
__builtin__, "open", mock.mock_open(read_data=acls)):
api_auth_manager.APIAuthorizationManager()
def testConfigurableRouterIsInitializedWithoutParameters(self):
acls = """
router: "DummyAuthManagerTestConfigurableApiRouter"
users:
- "u1"
"""
with mock.patch.object(__builtin__, "open", mock.mock_open(read_data=acls)):
auth_mgr = api_auth_manager.APIAuthorizationManager()
router = auth_mgr.GetRouterForUser("u1")
self.assertEqual(router.params.foo, "")
self.assertEqual(router.params.bar, 0)
def testConfigurableRouterIsInitializedWithParameters(self):
acls = """
router: "DummyAuthManagerTestConfigurableApiRouter"
router_params:
foo: "Oh no!"
bar: 42
users:
- "u1"
"""
with mock.patch.object(__builtin__, "open", mock.mock_open(read_data=acls)):
auth_mgr = api_auth_manager.APIAuthorizationManager()
router = auth_mgr.GetRouterForUser("u1")
self.assertEqual(router.params.foo, "Oh no!")
self.assertEqual(router.params.bar, 42)
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| 28.291803
| 80
| 0.758025
|
6d02f9056bde80577ea7eb5e2f256a1849939736
| 71
|
py
|
Python
|
erds_server/erds_server/local_settings.py
|
sbsimo/gpm-accumul
|
e5f3d0f6b5dc079d22be0422eb924d1cf87216b7
|
[
"MIT"
] | null | null | null |
erds_server/erds_server/local_settings.py
|
sbsimo/gpm-accumul
|
e5f3d0f6b5dc079d22be0422eb924d1cf87216b7
|
[
"MIT"
] | 5
|
2018-09-25T08:32:30.000Z
|
2019-10-01T06:51:33.000Z
|
erds_server/erds_server/local_settings.py
|
sbsimo/gpm-accumul
|
e5f3d0f6b5dc079d22be0422eb924d1cf87216b7
|
[
"MIT"
] | 2
|
2018-01-30T16:14:27.000Z
|
2020-07-29T07:06:58.000Z
|
from erds_server.settings import *
# add here all the local settings
| 14.2
| 34
| 0.774648
|
aa2c881b2dc3a9cc2fd4bc82c52456e8ae7f9e9e
| 71,193
|
py
|
Python
|
src/twisted/internet/defer.py
|
seanicus64/twisted
|
c0f1394c7bfb04d97c725a353a1f678fa6a1c602
|
[
"MIT",
"Unlicense"
] | 32
|
2019-11-14T07:49:33.000Z
|
2022-02-16T00:49:22.000Z
|
src/twisted/internet/defer.py
|
seanicus64/twisted
|
c0f1394c7bfb04d97c725a353a1f678fa6a1c602
|
[
"MIT",
"Unlicense"
] | 9
|
2019-09-06T18:21:59.000Z
|
2022-01-13T03:04:11.000Z
|
src/twisted/internet/defer.py
|
seanicus64/twisted
|
c0f1394c7bfb04d97c725a353a1f678fa6a1c602
|
[
"MIT",
"Unlicense"
] | 16
|
2019-06-25T13:26:43.000Z
|
2022-03-07T07:29:12.000Z
|
# -*- test-case-name: twisted.test.test_defer -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Support for results that aren't immediately available.
Maintainer: Glyph Lefkowitz
@var _NO_RESULT: The result used to represent the fact that there is no
result. B{Never ever ever use this as an actual result for a Deferred}. You
have been warned.
@var _CONTINUE: A marker left in L{Deferred.callback}s to indicate a Deferred
chain. Always accompanied by a Deferred instance in the args tuple pointing
at the Deferred which is chained to the Deferred which has this marker.
"""
from __future__ import division, absolute_import, print_function
import attr
import traceback
import types
import warnings
from sys import exc_info, version_info
from functools import wraps
from incremental import Version
# Twisted imports
from twisted.python.compat import cmp, comparable
from twisted.python import lockfile, failure
from twisted.logger import Logger
from twisted.python.deprecate import warnAboutFunction, deprecated
from twisted.python._oldstyle import _oldStyle
log = Logger()
class AlreadyCalledError(Exception):
pass
class CancelledError(Exception):
"""
This error is raised by default when a L{Deferred} is cancelled.
"""
class TimeoutError(Exception):
"""
This error is raised by default when a L{Deferred} times out.
"""
def logError(err):
"""
Log and return failure.
This method can be used as an errback that passes the failure on to the
next errback unmodified. Note that if this is the last errback, and the
deferred gets garbage collected after being this errback has been called,
the clean up code logs it again.
"""
log.failure(None, err)
return err
def succeed(result):
"""
Return a L{Deferred} that has already had C{.callback(result)} called.
This is useful when you're writing synchronous code to an
asynchronous interface: i.e., some code is calling you expecting a
L{Deferred} result, but you don't actually need to do anything
asynchronous. Just return C{defer.succeed(theResult)}.
See L{fail} for a version of this function that uses a failing
L{Deferred} rather than a successful one.
@param result: The result to give to the Deferred's 'callback'
method.
@rtype: L{Deferred}
"""
d = Deferred()
d.callback(result)
return d
def fail(result=None):
"""
Return a L{Deferred} that has already had C{.errback(result)} called.
See L{succeed}'s docstring for rationale.
@param result: The same argument that L{Deferred.errback} takes.
@raise NoCurrentExceptionError: If C{result} is L{None} but there is no
current exception state.
@rtype: L{Deferred}
"""
d = Deferred()
d.errback(result)
return d
def execute(callable, *args, **kw):
"""
Create a L{Deferred} from a callable and arguments.
Call the given function with the given arguments. Return a L{Deferred}
which has been fired with its callback as the result of that invocation
or its C{errback} with a L{Failure} for the exception thrown.
"""
try:
result = callable(*args, **kw)
except:
return fail()
else:
return succeed(result)
def maybeDeferred(f, *args, **kw):
"""
Invoke a function that may or may not return a L{Deferred}.
Call the given function with the given arguments. If the returned
object is a L{Deferred}, return it. If the returned object is a L{Failure},
wrap it with L{fail} and return it. Otherwise, wrap it in L{succeed} and
return it. If an exception is raised, convert it to a L{Failure}, wrap it
in L{fail}, and then return it.
@type f: Any callable
@param f: The callable to invoke
@param args: The arguments to pass to C{f}
@param kw: The keyword arguments to pass to C{f}
@rtype: L{Deferred}
@return: The result of the function call, wrapped in a L{Deferred} if
necessary.
"""
try:
result = f(*args, **kw)
except:
return fail(failure.Failure(captureVars=Deferred.debug))
if isinstance(result, Deferred):
return result
elif isinstance(result, failure.Failure):
return fail(result)
else:
return succeed(result)
@deprecated(Version('Twisted', 17, 1, 0),
replacement='twisted.internet.defer.Deferred.addTimeout')
def timeout(deferred):
deferred.errback(failure.Failure(TimeoutError("Callback timed out")))
def passthru(arg):
return arg
def setDebugging(on):
"""
Enable or disable L{Deferred} debugging.
When debugging is on, the call stacks from creation and invocation are
recorded, and added to any L{AlreadyCalledError}s we raise.
"""
Deferred.debug=bool(on)
def getDebugging():
"""
Determine whether L{Deferred} debugging is enabled.
"""
return Deferred.debug
# See module docstring.
_NO_RESULT = object()
_CONTINUE = object()
@_oldStyle
class Deferred:
"""
This is a callback which will be put off until later.
Why do we want this? Well, in cases where a function in a threaded
program would block until it gets a result, for Twisted it should
not block. Instead, it should return a L{Deferred}.
This can be implemented for protocols that run over the network by
writing an asynchronous protocol for L{twisted.internet}. For methods
that come from outside packages that are not under our control, we use
threads (see for example L{twisted.enterprise.adbapi}).
For more information about Deferreds, see doc/core/howto/defer.html or
U{http://twistedmatrix.com/documents/current/core/howto/defer.html}
When creating a Deferred, you may provide a canceller function, which
will be called by d.cancel() to let you do any clean-up necessary if the
user decides not to wait for the deferred to complete.
@ivar called: A flag which is C{False} until either C{callback} or
C{errback} is called and afterwards always C{True}.
@type called: L{bool}
@ivar paused: A counter of how many unmatched C{pause} calls have been made
on this instance.
@type paused: L{int}
@ivar _suppressAlreadyCalled: A flag used by the cancellation mechanism
which is C{True} if the Deferred has no canceller and has been
cancelled, C{False} otherwise. If C{True}, it can be expected that
C{callback} or C{errback} will eventually be called and the result
should be silently discarded.
@type _suppressAlreadyCalled: L{bool}
@ivar _runningCallbacks: A flag which is C{True} while this instance is
executing its callback chain, used to stop recursive execution of
L{_runCallbacks}
@type _runningCallbacks: L{bool}
@ivar _chainedTo: If this L{Deferred} is waiting for the result of another
L{Deferred}, this is a reference to the other Deferred. Otherwise,
L{None}.
"""
called = False
paused = False
_debugInfo = None
_suppressAlreadyCalled = False
# Are we currently running a user-installed callback? Meant to prevent
# recursive running of callbacks when a reentrant call to add a callback is
# used.
_runningCallbacks = False
# Keep this class attribute for now, for compatibility with code that
# sets it directly.
debug = False
_chainedTo = None
def __init__(self, canceller=None):
"""
Initialize a L{Deferred}.
@param canceller: a callable used to stop the pending operation
scheduled by this L{Deferred} when L{Deferred.cancel} is
invoked. The canceller will be passed the deferred whose
cancelation is requested (i.e., self).
If a canceller is not given, or does not invoke its argument's
C{callback} or C{errback} method, L{Deferred.cancel} will
invoke L{Deferred.errback} with a L{CancelledError}.
Note that if a canceller is not given, C{callback} or
C{errback} may still be invoked exactly once, even though
defer.py will have already invoked C{errback}, as described
above. This allows clients of code which returns a L{Deferred}
to cancel it without requiring the L{Deferred} instantiator to
provide any specific implementation support for cancellation.
New in 10.1.
@type canceller: a 1-argument callable which takes a L{Deferred}. The
return result is ignored.
"""
self.callbacks = []
self._canceller = canceller
if self.debug:
self._debugInfo = DebugInfo()
self._debugInfo.creator = traceback.format_stack()[:-1]
def addCallbacks(self, callback, errback=None,
callbackArgs=None, callbackKeywords=None,
errbackArgs=None, errbackKeywords=None):
"""
Add a pair of callbacks (success and error) to this L{Deferred}.
These will be executed when the 'master' callback is run.
@return: C{self}.
@rtype: a L{Deferred}
"""
assert callable(callback)
assert errback is None or callable(errback)
cbs = ((callback, callbackArgs, callbackKeywords),
(errback or (passthru), errbackArgs, errbackKeywords))
self.callbacks.append(cbs)
if self.called:
self._runCallbacks()
return self
def addCallback(self, callback, *args, **kw):
"""
Convenience method for adding just a callback.
See L{addCallbacks}.
"""
return self.addCallbacks(callback, callbackArgs=args,
callbackKeywords=kw)
def addErrback(self, errback, *args, **kw):
"""
Convenience method for adding just an errback.
See L{addCallbacks}.
"""
return self.addCallbacks(passthru, errback,
errbackArgs=args,
errbackKeywords=kw)
def addBoth(self, callback, *args, **kw):
"""
Convenience method for adding a single callable as both a callback
and an errback.
See L{addCallbacks}.
"""
return self.addCallbacks(callback, callback,
callbackArgs=args, errbackArgs=args,
callbackKeywords=kw, errbackKeywords=kw)
def addTimeout(self, timeout, clock, onTimeoutCancel=None):
"""
Time out this L{Deferred} by scheduling it to be cancelled after
C{timeout} seconds.
The timeout encompasses all the callbacks and errbacks added to this
L{defer.Deferred} before the call to L{addTimeout}, and none added
after the call.
If this L{Deferred} gets timed out, it errbacks with a L{TimeoutError},
unless a cancelable function was passed to its initialization or unless
a different C{onTimeoutCancel} callable is provided.
@param timeout: number of seconds to wait before timing out this
L{Deferred}
@type timeout: L{int}
@param clock: The object which will be used to schedule the timeout.
@type clock: L{twisted.internet.interfaces.IReactorTime}
@param onTimeoutCancel: A callable which is called immediately after
this L{Deferred} times out, and not if this L{Deferred} is
otherwise cancelled before the timeout. It takes an arbitrary
value, which is the value of this L{Deferred} at that exact point
in time (probably a L{CancelledError} L{Failure}), and the
C{timeout}. The default callable (if none is provided) will
translate a L{CancelledError} L{Failure} into a L{TimeoutError}.
@type onTimeoutCancel: L{callable}
@return: C{self}.
@rtype: a L{Deferred}
@since: 16.5
"""
timedOut = [False]
def timeItOut():
timedOut[0] = True
self.cancel()
delayedCall = clock.callLater(timeout, timeItOut)
def convertCancelled(value):
# if C{deferred} was timed out, call the translation function,
# if provdied, otherwise just use L{cancelledToTimedOutError}
if timedOut[0]:
toCall = onTimeoutCancel or _cancelledToTimedOutError
return toCall(value, timeout)
return value
self.addBoth(convertCancelled)
def cancelTimeout(result):
# stop the pending call to cancel the deferred if it's been fired
if delayedCall.active():
delayedCall.cancel()
return result
self.addBoth(cancelTimeout)
return self
def chainDeferred(self, d):
"""
Chain another L{Deferred} to this L{Deferred}.
This method adds callbacks to this L{Deferred} to call C{d}'s callback
or errback, as appropriate. It is merely a shorthand way of performing
the following::
self.addCallbacks(d.callback, d.errback)
When you chain a deferred d2 to another deferred d1 with
d1.chainDeferred(d2), you are making d2 participate in the callback
chain of d1. Thus any event that fires d1 will also fire d2.
However, the converse is B{not} true; if d2 is fired d1 will not be
affected.
Note that unlike the case where chaining is caused by a L{Deferred}
being returned from a callback, it is possible to cause the call
stack size limit to be exceeded by chaining many L{Deferred}s
together with C{chainDeferred}.
@return: C{self}.
@rtype: a L{Deferred}
"""
d._chainedTo = self
return self.addCallbacks(d.callback, d.errback)
def callback(self, result):
"""
Run all success callbacks that have been added to this L{Deferred}.
Each callback will have its result passed as the first argument to
the next; this way, the callbacks act as a 'processing chain'. If
the success-callback returns a L{Failure} or raises an L{Exception},
processing will continue on the *error* callback chain. If a
callback (or errback) returns another L{Deferred}, this L{Deferred}
will be chained to it (and further callbacks will not run until that
L{Deferred} has a result).
An instance of L{Deferred} may only have either L{callback} or
L{errback} called on it, and only once.
@param result: The object which will be passed to the first callback
added to this L{Deferred} (via L{addCallback}).
@raise AlreadyCalledError: If L{callback} or L{errback} has already been
called on this L{Deferred}.
"""
assert not isinstance(result, Deferred)
self._startRunCallbacks(result)
def errback(self, fail=None):
"""
Run all error callbacks that have been added to this L{Deferred}.
Each callback will have its result passed as the first
argument to the next; this way, the callbacks act as a
'processing chain'. Also, if the error-callback returns a non-Failure
or doesn't raise an L{Exception}, processing will continue on the
*success*-callback chain.
If the argument that's passed to me is not a L{failure.Failure} instance,
it will be embedded in one. If no argument is passed, a
L{failure.Failure} instance will be created based on the current
traceback stack.
Passing a string as `fail' is deprecated, and will be punished with
a warning message.
An instance of L{Deferred} may only have either L{callback} or
L{errback} called on it, and only once.
@param fail: The L{Failure} object which will be passed to the first
errback added to this L{Deferred} (via L{addErrback}).
Alternatively, a L{Exception} instance from which a L{Failure} will
be constructed (with no traceback) or L{None} to create a L{Failure}
instance from the current exception state (with a traceback).
@raise AlreadyCalledError: If L{callback} or L{errback} has already been
called on this L{Deferred}.
@raise NoCurrentExceptionError: If C{fail} is L{None} but there is
no current exception state.
"""
if fail is None:
fail = failure.Failure(captureVars=self.debug)
elif not isinstance(fail, failure.Failure):
fail = failure.Failure(fail)
self._startRunCallbacks(fail)
def pause(self):
"""
Stop processing on a L{Deferred} until L{unpause}() is called.
"""
self.paused = self.paused + 1
def unpause(self):
"""
Process all callbacks made since L{pause}() was called.
"""
self.paused = self.paused - 1
if self.paused:
return
if self.called:
self._runCallbacks()
def cancel(self):
"""
Cancel this L{Deferred}.
If the L{Deferred} has not yet had its C{errback} or C{callback} method
invoked, call the canceller function provided to the constructor. If
that function does not invoke C{callback} or C{errback}, or if no
canceller function was provided, errback with L{CancelledError}.
If this L{Deferred} is waiting on another L{Deferred}, forward the
cancellation to the other L{Deferred}.
"""
if not self.called:
canceller = self._canceller
if canceller:
canceller(self)
else:
# Arrange to eat the callback that will eventually be fired
# since there was no real canceller.
self._suppressAlreadyCalled = True
if not self.called:
# There was no canceller, or the canceller didn't call
# callback or errback.
self.errback(failure.Failure(CancelledError()))
elif isinstance(self.result, Deferred):
# Waiting for another deferred -- cancel it instead.
self.result.cancel()
def _startRunCallbacks(self, result):
if self.called:
if self._suppressAlreadyCalled:
self._suppressAlreadyCalled = False
return
if self.debug:
if self._debugInfo is None:
self._debugInfo = DebugInfo()
extra = "\n" + self._debugInfo._getDebugTracebacks()
raise AlreadyCalledError(extra)
raise AlreadyCalledError
if self.debug:
if self._debugInfo is None:
self._debugInfo = DebugInfo()
self._debugInfo.invoker = traceback.format_stack()[:-2]
self.called = True
self.result = result
self._runCallbacks()
def _continuation(self):
"""
Build a tuple of callback and errback with L{_CONTINUE}.
"""
return ((_CONTINUE, (self,), None),
(_CONTINUE, (self,), None))
def _runCallbacks(self):
"""
Run the chain of callbacks once a result is available.
This consists of a simple loop over all of the callbacks, calling each
with the current result and making the current result equal to the
return value (or raised exception) of that call.
If L{_runningCallbacks} is true, this loop won't run at all, since
it is already running above us on the call stack. If C{self.paused} is
true, the loop also won't run, because that's what it means to be
paused.
The loop will terminate before processing all of the callbacks if a
L{Deferred} without a result is encountered.
If a L{Deferred} I{with} a result is encountered, that result is taken
and the loop proceeds.
@note: The implementation is complicated slightly by the fact that
chaining (associating two L{Deferred}s with each other such that one
will wait for the result of the other, as happens when a Deferred is
returned from a callback on another L{Deferred}) is supported
iteratively rather than recursively, to avoid running out of stack
frames when processing long chains.
"""
if self._runningCallbacks:
# Don't recursively run callbacks
return
# Keep track of all the Deferreds encountered while propagating results
# up a chain. The way a Deferred gets onto this stack is by having
# added its _continuation() to the callbacks list of a second Deferred
# and then that second Deferred being fired. ie, if ever had _chainedTo
# set to something other than None, you might end up on this stack.
chain = [self]
while chain:
current = chain[-1]
if current.paused:
# This Deferred isn't going to produce a result at all. All the
# Deferreds up the chain waiting on it will just have to...
# wait.
return
finished = True
current._chainedTo = None
while current.callbacks:
item = current.callbacks.pop(0)
callback, args, kw = item[
isinstance(current.result, failure.Failure)]
args = args or ()
kw = kw or {}
# Avoid recursion if we can.
if callback is _CONTINUE:
# Give the waiting Deferred our current result and then
# forget about that result ourselves.
chainee = args[0]
chainee.result = current.result
current.result = None
# Making sure to update _debugInfo
if current._debugInfo is not None:
current._debugInfo.failResult = None
chainee.paused -= 1
chain.append(chainee)
# Delay cleaning this Deferred and popping it from the chain
# until after we've dealt with chainee.
finished = False
break
try:
current._runningCallbacks = True
try:
current.result = callback(current.result, *args, **kw)
if current.result is current:
warnAboutFunction(
callback,
"Callback returned the Deferred "
"it was attached to; this breaks the "
"callback chain and will raise an "
"exception in the future.")
finally:
current._runningCallbacks = False
except:
# Including full frame information in the Failure is quite
# expensive, so we avoid it unless self.debug is set.
current.result = failure.Failure(captureVars=self.debug)
else:
if isinstance(current.result, Deferred):
# The result is another Deferred. If it has a result,
# we can take it and keep going.
resultResult = getattr(current.result, 'result', _NO_RESULT)
if resultResult is _NO_RESULT or isinstance(resultResult, Deferred) or current.result.paused:
# Nope, it didn't. Pause and chain.
current.pause()
current._chainedTo = current.result
# Note: current.result has no result, so it's not
# running its callbacks right now. Therefore we can
# append to the callbacks list directly instead of
# using addCallbacks.
current.result.callbacks.append(current._continuation())
break
else:
# Yep, it did. Steal it.
current.result.result = None
# Make sure _debugInfo's failure state is updated.
if current.result._debugInfo is not None:
current.result._debugInfo.failResult = None
current.result = resultResult
if finished:
# As much of the callback chain - perhaps all of it - as can be
# processed right now has been. The current Deferred is waiting on
# another Deferred or for more callbacks. Before finishing with it,
# make sure its _debugInfo is in the proper state.
if isinstance(current.result, failure.Failure):
# Stash the Failure in the _debugInfo for unhandled error
# reporting.
current.result.cleanFailure()
if current._debugInfo is None:
current._debugInfo = DebugInfo()
current._debugInfo.failResult = current.result
else:
# Clear out any Failure in the _debugInfo, since the result
# is no longer a Failure.
if current._debugInfo is not None:
current._debugInfo.failResult = None
# This Deferred is done, pop it from the chain and move back up
# to the Deferred which supplied us with our result.
chain.pop()
def __str__(self):
"""
Return a string representation of this C{Deferred}.
"""
cname = self.__class__.__name__
result = getattr(self, 'result', _NO_RESULT)
myID = id(self)
if self._chainedTo is not None:
result = ' waiting on Deferred at 0x%x' % (id(self._chainedTo),)
elif result is _NO_RESULT:
result = ''
else:
result = ' current result: %r' % (result,)
return "<%s at 0x%x%s>" % (cname, myID, result)
__repr__ = __str__
def __iter__(self):
return self
@failure._extraneous
def send(self, value=None):
if self.paused:
# If we're paused, we have no result to give
return self
result = getattr(self, 'result', _NO_RESULT)
if result is _NO_RESULT:
return self
if isinstance(result, failure.Failure):
# Clear the failure on debugInfo so it doesn't raise "unhandled
# exception"
self._debugInfo.failResult = None
result.value.__failure__ = result
raise result.value
else:
raise StopIteration(result)
# For PEP-492 support (async/await)
__await__ = __iter__
__next__ = send
def asFuture(self, loop):
"""
Adapt a L{Deferred} into a L{asyncio.Future} which is bound to C{loop}.
@note: converting a L{Deferred} to an L{asyncio.Future} consumes both
its result and its errors, so this method implicitly converts
C{self} into a L{Deferred} firing with L{None}, regardless of what
its result previously would have been.
@since: Twisted 17.5.0
@param loop: The asyncio event loop to bind the L{asyncio.Future} to.
@type loop: L{asyncio.AbstractEventLoop} or similar
@param deferred: The Deferred to adapt.
@type deferred: L{Deferred}
@return: A Future which will fire when the Deferred fires.
@rtype: L{asyncio.Future}
"""
try:
createFuture = loop.create_future
except AttributeError:
from asyncio import Future
def createFuture():
return Future(loop=loop)
future = createFuture()
def checkCancel(futureAgain):
if futureAgain.cancelled():
self.cancel()
def maybeFail(failure):
if not future.cancelled():
future.set_exception(failure.value)
def maybeSucceed(result):
if not future.cancelled():
future.set_result(result)
self.addCallbacks(maybeSucceed, maybeFail)
future.add_done_callback(checkCancel)
return future
@classmethod
def fromFuture(cls, future):
"""
Adapt an L{asyncio.Future} to a L{Deferred}.
@note: This creates a L{Deferred} from a L{asyncio.Future}, I{not} from
a C{coroutine}; in other words, you will need to call
L{asyncio.ensure_future},
L{asyncio.loop.create_task} or create an
L{asyncio.Task} yourself to get from a C{coroutine} to a
L{asyncio.Future} if what you have is an awaitable coroutine and
not a L{asyncio.Future}. (The length of this list of techniques is
exactly why we have left it to the caller!)
@since: Twisted 17.5.0
@param future: The Future to adapt.
@type future: L{asyncio.Future}
@return: A Deferred which will fire when the Future fires.
@rtype: L{Deferred}
"""
def adapt(result):
try:
extracted = result.result()
except:
extracted = failure.Failure()
adapt.actual.callback(extracted)
futureCancel = object()
def cancel(reself):
future.cancel()
reself.callback(futureCancel)
self = cls(cancel)
adapt.actual = self
def uncancel(result):
if result is futureCancel:
adapt.actual = Deferred()
return adapt.actual
return result
self.addCallback(uncancel)
future.add_done_callback(adapt)
return self
def _cancelledToTimedOutError(value, timeout):
"""
A default translation function that translates L{Failure}s that are
L{CancelledError}s to L{TimeoutError}s.
@param value: Anything
@type value: Anything
@param timeout: The timeout
@type timeout: L{int}
@rtype: C{value}
@raise: L{TimeoutError}
@since: 16.5
"""
if isinstance(value, failure.Failure):
value.trap(CancelledError)
raise TimeoutError(timeout, "Deferred")
return value
def ensureDeferred(coro):
"""
Schedule the execution of a coroutine that awaits/yields from L{Deferred}s,
wrapping it in a L{Deferred} that will fire on success/failure of the
coroutine. If a Deferred is passed to this function, it will be returned
directly (mimicing C{asyncio}'s C{ensure_future} function).
Coroutine functions return a coroutine object, similar to how generators
work. This function turns that coroutine into a Deferred, meaning that it
can be used in regular Twisted code. For example::
import treq
from twisted.internet.defer import ensureDeferred
from twisted.internet.task import react
async def crawl(pages):
results = {}
for page in pages:
results[page] = await treq.content(await treq.get(page))
return results
def main(reactor):
pages = [
"http://localhost:8080"
]
d = ensureDeferred(crawl(pages))
d.addCallback(print)
return d
react(main)
@param coro: The coroutine object to schedule, or a L{Deferred}.
@type coro: A Python 3.5+ C{async def} C{coroutine}, a Python 3.4+
C{yield from} using L{types.GeneratorType}, or a L{Deferred}.
@rtype: L{Deferred}
"""
from types import GeneratorType
if version_info >= (3, 4, 0):
from asyncio import iscoroutine
if iscoroutine(coro) or isinstance(coro, GeneratorType):
return _cancellableInlineCallbacks(coro)
if not isinstance(coro, Deferred):
raise ValueError("%r is not a coroutine or a Deferred" % (coro,))
# Must be a Deferred
return coro
@_oldStyle
class DebugInfo:
"""
Deferred debug helper.
"""
failResult = None
def _getDebugTracebacks(self):
info = ''
if hasattr(self, "creator"):
info += " C: Deferred was created:\n C:"
info += "".join(self.creator).rstrip().replace("\n", "\n C:")
info += "\n"
if hasattr(self, "invoker"):
info += " I: First Invoker was:\n I:"
info += "".join(self.invoker).rstrip().replace("\n", "\n I:")
info += "\n"
return info
def __del__(self):
"""
Print tracebacks and die.
If the *last* (and I do mean *last*) callback leaves me in an error
state, print a traceback (if said errback is a L{Failure}).
"""
if self.failResult is not None:
# Note: this is two separate messages for compatibility with
# earlier tests; arguably it should be a single error message.
log.critical("Unhandled error in Deferred:",
isError=True)
debugInfo = self._getDebugTracebacks()
if debugInfo:
format = "(debug: {debugInfo})"
else:
format = None
log.failure(format,
self.failResult,
debugInfo=debugInfo)
@comparable
class FirstError(Exception):
"""
First error to occur in a L{DeferredList} if C{fireOnOneErrback} is set.
@ivar subFailure: The L{Failure} that occurred.
@type subFailure: L{Failure}
@ivar index: The index of the L{Deferred} in the L{DeferredList} where
it happened.
@type index: L{int}
"""
def __init__(self, failure, index):
Exception.__init__(self, failure, index)
self.subFailure = failure
self.index = index
def __repr__(self):
"""
The I{repr} of L{FirstError} instances includes the repr of the
wrapped failure's exception and the index of the L{FirstError}.
"""
return 'FirstError[#%d, %r]' % (self.index, self.subFailure.value)
def __str__(self):
"""
The I{str} of L{FirstError} instances includes the I{str} of the
entire wrapped failure (including its traceback and exception) and
the index of the L{FirstError}.
"""
return 'FirstError[#%d, %s]' % (self.index, self.subFailure)
def __cmp__(self, other):
"""
Comparison between L{FirstError} and other L{FirstError} instances
is defined as the comparison of the index and sub-failure of each
instance. L{FirstError} instances don't compare equal to anything
that isn't a L{FirstError} instance.
@since: 8.2
"""
if isinstance(other, FirstError):
return cmp(
(self.index, self.subFailure),
(other.index, other.subFailure))
return -1
class DeferredList(Deferred):
"""
L{DeferredList} is a tool for collecting the results of several Deferreds.
This tracks a list of L{Deferred}s for their results, and makes a single
callback when they have all completed. By default, the ultimate result is a
list of (success, result) tuples, 'success' being a boolean.
L{DeferredList} exposes the same API that L{Deferred} does, so callbacks and
errbacks can be added to it in the same way.
L{DeferredList} is implemented by adding callbacks and errbacks to each
L{Deferred} in the list passed to it. This means callbacks and errbacks
added to the Deferreds before they are passed to L{DeferredList} will change
the result that L{DeferredList} sees (i.e., L{DeferredList} is not special).
Callbacks and errbacks can also be added to the Deferreds after they are
passed to L{DeferredList} and L{DeferredList} may change the result that
they see.
See the documentation for the C{__init__} arguments for more information.
@ivar _deferredList: The L{list} of L{Deferred}s to track.
"""
fireOnOneCallback = False
fireOnOneErrback = False
def __init__(self, deferredList, fireOnOneCallback=False,
fireOnOneErrback=False, consumeErrors=False):
"""
Initialize a DeferredList.
@param deferredList: The list of deferreds to track.
@type deferredList: L{list} of L{Deferred}s
@param fireOnOneCallback: (keyword param) a flag indicating that this
L{DeferredList} will fire when the first L{Deferred} in
C{deferredList} fires with a non-failure result without waiting for
any of the other Deferreds. When this flag is set, the DeferredList
will fire with a two-tuple: the first element is the result of the
Deferred which fired; the second element is the index in
C{deferredList} of that Deferred.
@type fireOnOneCallback: L{bool}
@param fireOnOneErrback: (keyword param) a flag indicating that this
L{DeferredList} will fire when the first L{Deferred} in
C{deferredList} fires with a failure result without waiting for any
of the other Deferreds. When this flag is set, if a Deferred in the
list errbacks, the DeferredList will errback with a L{FirstError}
failure wrapping the failure of that Deferred.
@type fireOnOneErrback: L{bool}
@param consumeErrors: (keyword param) a flag indicating that failures in
any of the included L{Deferred}s should not be propagated to
errbacks added to the individual L{Deferred}s after this
L{DeferredList} is constructed. After constructing the
L{DeferredList}, any errors in the individual L{Deferred}s will be
converted to a callback result of L{None}. This is useful to
prevent spurious 'Unhandled error in Deferred' messages from being
logged. This does not prevent C{fireOnOneErrback} from working.
@type consumeErrors: L{bool}
"""
self._deferredList = list(deferredList)
self.resultList = [None] * len(self._deferredList)
Deferred.__init__(self)
if len(self._deferredList) == 0 and not fireOnOneCallback:
self.callback(self.resultList)
# These flags need to be set *before* attaching callbacks to the
# deferreds, because the callbacks use these flags, and will run
# synchronously if any of the deferreds are already fired.
self.fireOnOneCallback = fireOnOneCallback
self.fireOnOneErrback = fireOnOneErrback
self.consumeErrors = consumeErrors
self.finishedCount = 0
index = 0
for deferred in self._deferredList:
deferred.addCallbacks(self._cbDeferred, self._cbDeferred,
callbackArgs=(index,SUCCESS),
errbackArgs=(index,FAILURE))
index = index + 1
def _cbDeferred(self, result, index, succeeded):
"""
(internal) Callback for when one of my deferreds fires.
"""
self.resultList[index] = (succeeded, result)
self.finishedCount += 1
if not self.called:
if succeeded == SUCCESS and self.fireOnOneCallback:
self.callback((result, index))
elif succeeded == FAILURE and self.fireOnOneErrback:
self.errback(failure.Failure(FirstError(result, index)))
elif self.finishedCount == len(self.resultList):
self.callback(self.resultList)
if succeeded == FAILURE and self.consumeErrors:
result = None
return result
def cancel(self):
"""
Cancel this L{DeferredList}.
If the L{DeferredList} hasn't fired yet, cancel every L{Deferred} in
the list.
If the L{DeferredList} has fired, including the case where the
C{fireOnOneCallback}/C{fireOnOneErrback} flag is set and the
L{DeferredList} fires because one L{Deferred} in the list fires with a
non-failure/failure result, do nothing in the C{cancel} method.
"""
if not self.called:
for deferred in self._deferredList:
try:
deferred.cancel()
except:
log.failure(
"Exception raised from user supplied canceller"
)
def _parseDListResult(l, fireOnOneErrback=False):
if __debug__:
for success, value in l:
assert success
return [x[1] for x in l]
def gatherResults(deferredList, consumeErrors=False):
"""
Returns, via a L{Deferred}, a list with the results of the given
L{Deferred}s - in effect, a "join" of multiple deferred operations.
The returned L{Deferred} will fire when I{all} of the provided L{Deferred}s
have fired, or when any one of them has failed.
This method can be cancelled by calling the C{cancel} method of the
L{Deferred}, all the L{Deferred}s in the list will be cancelled.
This differs from L{DeferredList} in that you don't need to parse
the result for success/failure.
@type deferredList: L{list} of L{Deferred}s
@param consumeErrors: (keyword param) a flag, defaulting to False,
indicating that failures in any of the given L{Deferred}s should not be
propagated to errbacks added to the individual L{Deferred}s after this
L{gatherResults} invocation. Any such errors in the individual
L{Deferred}s will be converted to a callback result of L{None}. This
is useful to prevent spurious 'Unhandled error in Deferred' messages
from being logged. This parameter is available since 11.1.0.
@type consumeErrors: L{bool}
"""
d = DeferredList(deferredList, fireOnOneErrback=True,
consumeErrors=consumeErrors)
d.addCallback(_parseDListResult)
return d
# Constants for use with DeferredList
SUCCESS = True
FAILURE = False
## deferredGenerator
@_oldStyle
class waitForDeferred:
"""
See L{deferredGenerator}.
"""
def __init__(self, d):
warnings.warn(
"twisted.internet.defer.waitForDeferred was deprecated in "
"Twisted 15.0.0; please use twisted.internet.defer.inlineCallbacks "
"instead", DeprecationWarning, stacklevel=2)
if not isinstance(d, Deferred):
raise TypeError("You must give waitForDeferred a Deferred. You gave it %r." % (d,))
self.d = d
def getResult(self):
if isinstance(self.result, failure.Failure):
self.result.raiseException()
return self.result
def _deferGenerator(g, deferred):
"""
See L{deferredGenerator}.
"""
result = None
# This function is complicated by the need to prevent unbounded recursion
# arising from repeatedly yielding immediately ready deferreds. This while
# loop and the waiting variable solve that by manually unfolding the
# recursion.
waiting = [True, # defgen is waiting for result?
None] # result
while 1:
try:
result = next(g)
except StopIteration:
deferred.callback(result)
return deferred
except:
deferred.errback()
return deferred
# Deferred.callback(Deferred) raises an error; we catch this case
# early here and give a nicer error message to the user in case
# they yield a Deferred.
if isinstance(result, Deferred):
return fail(TypeError("Yield waitForDeferred(d), not d!"))
if isinstance(result, waitForDeferred):
# a waitForDeferred was yielded, get the result.
# Pass result in so it don't get changed going around the loop
# This isn't a problem for waiting, as it's only reused if
# gotResult has already been executed.
def gotResult(r, result=result):
result.result = r
if waiting[0]:
waiting[0] = False
waiting[1] = r
else:
_deferGenerator(g, deferred)
result.d.addBoth(gotResult)
if waiting[0]:
# Haven't called back yet, set flag so that we get reinvoked
# and return from the loop
waiting[0] = False
return deferred
# Reset waiting to initial values for next loop
waiting[0] = True
waiting[1] = None
result = None
@deprecated(Version('Twisted', 15, 0, 0),
"twisted.internet.defer.inlineCallbacks")
def deferredGenerator(f):
"""
L{deferredGenerator} and L{waitForDeferred} help you write
L{Deferred}-using code that looks like a regular sequential function.
Consider the use of L{inlineCallbacks} instead, which can accomplish
the same thing in a more concise manner.
There are two important functions involved: L{waitForDeferred}, and
L{deferredGenerator}. They are used together, like this::
@deferredGenerator
def thingummy():
thing = waitForDeferred(makeSomeRequestResultingInDeferred())
yield thing
thing = thing.getResult()
print(thing) #the result! hoorj!
L{waitForDeferred} returns something that you should immediately yield; when
your generator is resumed, calling C{thing.getResult()} will either give you
the result of the L{Deferred} if it was a success, or raise an exception if it
was a failure. Calling C{getResult} is B{absolutely mandatory}. If you do
not call it, I{your program will not work}.
L{deferredGenerator} takes one of these waitForDeferred-using generator
functions and converts it into a function that returns a L{Deferred}. The
result of the L{Deferred} will be the last value that your generator yielded
unless the last value is a L{waitForDeferred} instance, in which case the
result will be L{None}. If the function raises an unhandled exception, the
L{Deferred} will errback instead. Remember that C{return result} won't work;
use C{yield result; return} in place of that.
Note that not yielding anything from your generator will make the L{Deferred}
result in L{None}. Yielding a L{Deferred} from your generator is also an error
condition; always yield C{waitForDeferred(d)} instead.
The L{Deferred} returned from your deferred generator may also errback if your
generator raised an exception. For example::
@deferredGenerator
def thingummy():
thing = waitForDeferred(makeSomeRequestResultingInDeferred())
yield thing
thing = thing.getResult()
if thing == 'I love Twisted':
# will become the result of the Deferred
yield 'TWISTED IS GREAT!'
return
else:
# will trigger an errback
raise Exception('DESTROY ALL LIFE')
Put succinctly, these functions connect deferred-using code with this 'fake
blocking' style in both directions: L{waitForDeferred} converts from a
L{Deferred} to the 'blocking' style, and L{deferredGenerator} converts from the
'blocking' style to a L{Deferred}.
"""
@wraps(f)
def unwindGenerator(*args, **kwargs):
return _deferGenerator(f(*args, **kwargs), Deferred())
return unwindGenerator
## inlineCallbacks
class _DefGen_Return(BaseException):
def __init__(self, value):
self.value = value
def returnValue(val):
"""
Return val from a L{inlineCallbacks} generator.
Note: this is currently implemented by raising an exception
derived from L{BaseException}. You might want to change any
'except:' clauses to an 'except Exception:' clause so as not to
catch this exception.
Also: while this function currently will work when called from
within arbitrary functions called from within the generator, do
not rely upon this behavior.
"""
raise _DefGen_Return(val)
@attr.s
class _CancellationStatus(object):
"""
Cancellation status of an L{inlineCallbacks} invocation.
@ivar waitingOn: the L{Deferred} being waited upon (which
L{_inlineCallbacks} must fill out before returning)
@ivar deferred: the L{Deferred} to callback or errback when the generator
invocation has finished.
"""
deferred = attr.ib()
waitingOn = attr.ib(default=None)
@failure._extraneous
def _inlineCallbacks(result, g, status):
"""
Carry out the work of L{inlineCallbacks}.
Iterate the generator produced by an C{@}L{inlineCallbacks}-decorated
function, C{g}, C{send()}ing it the results of each value C{yield}ed by
that generator, until a L{Deferred} is yielded, at which point a callback
is added to that L{Deferred} to call this function again.
@param result: The last result seen by this generator. Note that this is
never a L{Deferred} - by the time this function is invoked, the
L{Deferred} has been called back and this will be a particular result
at a point in its callback chain.
@param g: a generator object returned by calling a function or method
decorated with C{@}L{inlineCallbacks}
@param status: a L{_CancellationStatus} tracking the current status of C{g}
"""
# This function is complicated by the need to prevent unbounded recursion
# arising from repeatedly yielding immediately ready deferreds. This while
# loop and the waiting variable solve that by manually unfolding the
# recursion.
waiting = [True, # waiting for result?
None] # result
while 1:
try:
# Send the last result back as the result of the yield expression.
isFailure = isinstance(result, failure.Failure)
if isFailure:
result = result.throwExceptionIntoGenerator(g)
else:
result = g.send(result)
except StopIteration as e:
# fell off the end, or "return" statement
status.deferred.callback(getattr(e, "value", None))
return
except _DefGen_Return as e:
# returnValue() was called; time to give a result to the original
# Deferred. First though, let's try to identify the potentially
# confusing situation which results when returnValue() is
# accidentally invoked from a different function, one that wasn't
# decorated with @inlineCallbacks.
# The traceback starts in this frame (the one for
# _inlineCallbacks); the next one down should be the application
# code.
appCodeTrace = exc_info()[2].tb_next
if isFailure:
# If we invoked this generator frame by throwing an exception
# into it, then throwExceptionIntoGenerator will consume an
# additional stack frame itself, so we need to skip that too.
appCodeTrace = appCodeTrace.tb_next
# Now that we've identified the frame being exited by the
# exception, let's figure out if returnValue was called from it
# directly. returnValue itself consumes a stack frame, so the
# application code will have a tb_next, but it will *not* have a
# second tb_next.
if appCodeTrace.tb_next.tb_next:
# If returnValue was invoked non-local to the frame which it is
# exiting, identify the frame that ultimately invoked
# returnValue so that we can warn the user, as this behavior is
# confusing.
ultimateTrace = appCodeTrace
while ultimateTrace.tb_next.tb_next:
ultimateTrace = ultimateTrace.tb_next
filename = ultimateTrace.tb_frame.f_code.co_filename
lineno = ultimateTrace.tb_lineno
warnings.warn_explicit(
"returnValue() in %r causing %r to exit: "
"returnValue should only be invoked by functions decorated "
"with inlineCallbacks" % (
ultimateTrace.tb_frame.f_code.co_name,
appCodeTrace.tb_frame.f_code.co_name),
DeprecationWarning, filename, lineno)
status.deferred.callback(e.value)
return
except:
status.deferred.errback()
return
if isinstance(result, Deferred):
# a deferred was yielded, get the result.
def gotResult(r):
if waiting[0]:
waiting[0] = False
waiting[1] = r
else:
# We are not waiting for deferred result any more
_inlineCallbacks(r, g, status)
result.addBoth(gotResult)
if waiting[0]:
# Haven't called back yet, set flag so that we get reinvoked
# and return from the loop
waiting[0] = False
status.waitingOn = result
return
result = waiting[1]
# Reset waiting to initial values for next loop. gotResult uses
# waiting, but this isn't a problem because gotResult is only
# executed once, and if it hasn't been executed yet, the return
# branch above would have been taken.
waiting[0] = True
waiting[1] = None
def _cancellableInlineCallbacks(g):
"""
Make an C{@}L{inlineCallbacks} cancellable.
@param g: a generator object returned by calling a function or method
decorated with C{@}L{inlineCallbacks}
@return: L{Deferred} for the C{@}L{inlineCallbacks} that is cancellable.
"""
def cancel(it):
it.callbacks, tmp = [], it.callbacks
it.addErrback(handleCancel)
it.callbacks.extend(tmp)
it.errback(_InternalInlineCallbacksCancelledError())
deferred = Deferred(cancel)
status = _CancellationStatus(deferred)
def handleCancel(result):
"""
Propagate the cancellation of an C{@}L{inlineCallbacks} to the
L{Deferred} it is waiting on.
@param result: An L{_InternalInlineCallbacksCancelledError} from
C{cancel()}.
@return: A new L{Deferred} that the C{@}L{inlineCallback} generator
can callback or errback through.
"""
result.trap(_InternalInlineCallbacksCancelledError)
status.deferred = Deferred(cancel)
# We would only end up here if the inlineCallback is waiting on
# another Deferred. It needs to be cancelled.
awaited = status.waitingOn
awaited.cancel()
return status.deferred
_inlineCallbacks(None, g, status)
return deferred
class _InternalInlineCallbacksCancelledError(Exception):
"""
A unique exception used only in L{_cancellableInlineCallbacks} to verify
that an L{inlineCallbacks} is being cancelled as expected.
"""
def inlineCallbacks(f):
"""
L{inlineCallbacks} helps you write L{Deferred}-using code that looks like a
regular sequential function. For example::
@inlineCallbacks
def thingummy():
thing = yield makeSomeRequestResultingInDeferred()
print(thing) # the result! hoorj!
When you call anything that results in a L{Deferred}, you can simply yield it;
your generator will automatically be resumed when the Deferred's result is
available. The generator will be sent the result of the L{Deferred} with the
'send' method on generators, or if the result was a failure, 'throw'.
Things that are not L{Deferred}s may also be yielded, and your generator
will be resumed with the same object sent back. This means C{yield}
performs an operation roughly equivalent to L{maybeDeferred}.
Your inlineCallbacks-enabled generator will return a L{Deferred} object, which
will result in the return value of the generator (or will fail with a
failure object if your generator raises an unhandled exception). Note that
you can't use C{return result} to return a value; use C{returnValue(result)}
instead. Falling off the end of the generator, or simply using C{return}
will cause the L{Deferred} to have a result of L{None}.
Be aware that L{returnValue} will not accept a L{Deferred} as a parameter.
If you believe the thing you'd like to return could be a L{Deferred}, do
this::
result = yield result
returnValue(result)
The L{Deferred} returned from your deferred generator may errback if your
generator raised an exception::
@inlineCallbacks
def thingummy():
thing = yield makeSomeRequestResultingInDeferred()
if thing == 'I love Twisted':
# will become the result of the Deferred
returnValue('TWISTED IS GREAT!')
else:
# will trigger an errback
raise Exception('DESTROY ALL LIFE')
It is possible to use the C{return} statement instead of L{returnValue}::
@inlineCallbacks
def loadData(url):
response = yield makeRequest(url)
return json.loads(response)
You can cancel the L{Deferred} returned from your L{inlineCallbacks}
generator before it is fired by your generator completing (either by
reaching its end, a C{return} statement, or by calling L{returnValue}).
A C{CancelledError} will be raised from the C{yielde}ed L{Deferred} that
has been cancelled if that C{Deferred} does not otherwise suppress it.
"""
@wraps(f)
def unwindGenerator(*args, **kwargs):
try:
gen = f(*args, **kwargs)
except _DefGen_Return:
raise TypeError(
"inlineCallbacks requires %r to produce a generator; instead"
"caught returnValue being used in a non-generator" % (f,))
if not isinstance(gen, types.GeneratorType):
raise TypeError(
"inlineCallbacks requires %r to produce a generator; "
"instead got %r" % (f, gen))
return _cancellableInlineCallbacks(gen)
return unwindGenerator
## DeferredLock/DeferredQueue
class _ConcurrencyPrimitive(object):
def __init__(self):
self.waiting = []
def _releaseAndReturn(self, r):
self.release()
return r
def run(*args, **kwargs):
"""
Acquire, run, release.
This function takes a callable as its first argument and any
number of other positional and keyword arguments. When the
lock or semaphore is acquired, the callable will be invoked
with those arguments.
The callable may return a L{Deferred}; if it does, the lock or
semaphore won't be released until that L{Deferred} fires.
@return: L{Deferred} of function result.
"""
if len(args) < 2:
if not args:
raise TypeError("run() takes at least 2 arguments, none given.")
raise TypeError("%s.run() takes at least 2 arguments, 1 given" % (
args[0].__class__.__name__,))
self, f = args[:2]
args = args[2:]
def execute(ignoredResult):
d = maybeDeferred(f, *args, **kwargs)
d.addBoth(self._releaseAndReturn)
return d
d = self.acquire()
d.addCallback(execute)
return d
class DeferredLock(_ConcurrencyPrimitive):
"""
A lock for event driven systems.
@ivar locked: C{True} when this Lock has been acquired, false at all other
times. Do not change this value, but it is useful to examine for the
equivalent of a "non-blocking" acquisition.
"""
locked = False
def _cancelAcquire(self, d):
"""
Remove a deferred d from our waiting list, as the deferred has been
canceled.
Note: We do not need to wrap this in a try/except to catch d not
being in self.waiting because this canceller will not be called if
d has fired. release() pops a deferred out of self.waiting and
calls it, so the canceller will no longer be called.
@param d: The deferred that has been canceled.
"""
self.waiting.remove(d)
def acquire(self):
"""
Attempt to acquire the lock. Returns a L{Deferred} that fires on
lock acquisition with the L{DeferredLock} as the value. If the lock
is locked, then the Deferred is placed at the end of a waiting list.
@return: a L{Deferred} which fires on lock acquisition.
@rtype: a L{Deferred}
"""
d = Deferred(canceller=self._cancelAcquire)
if self.locked:
self.waiting.append(d)
else:
self.locked = True
d.callback(self)
return d
def release(self):
"""
Release the lock. If there is a waiting list, then the first
L{Deferred} in that waiting list will be called back.
Should be called by whomever did the L{acquire}() when the shared
resource is free.
"""
assert self.locked, "Tried to release an unlocked lock"
self.locked = False
if self.waiting:
# someone is waiting to acquire lock
self.locked = True
d = self.waiting.pop(0)
d.callback(self)
class DeferredSemaphore(_ConcurrencyPrimitive):
"""
A semaphore for event driven systems.
If you are looking into this as a means of limiting parallelism, you might
find L{twisted.internet.task.Cooperator} more useful.
@ivar limit: At most this many users may acquire this semaphore at
once.
@type limit: L{int}
@ivar tokens: The difference between C{limit} and the number of users
which have currently acquired this semaphore.
@type tokens: L{int}
"""
def __init__(self, tokens):
"""
@param tokens: initial value of L{tokens} and L{limit}
@type tokens: L{int}
"""
_ConcurrencyPrimitive.__init__(self)
if tokens < 1:
raise ValueError("DeferredSemaphore requires tokens >= 1")
self.tokens = tokens
self.limit = tokens
def _cancelAcquire(self, d):
"""
Remove a deferred d from our waiting list, as the deferred has been
canceled.
Note: We do not need to wrap this in a try/except to catch d not
being in self.waiting because this canceller will not be called if
d has fired. release() pops a deferred out of self.waiting and
calls it, so the canceller will no longer be called.
@param d: The deferred that has been canceled.
"""
self.waiting.remove(d)
def acquire(self):
"""
Attempt to acquire the token.
@return: a L{Deferred} which fires on token acquisition.
"""
assert self.tokens >= 0, "Internal inconsistency?? tokens should never be negative"
d = Deferred(canceller=self._cancelAcquire)
if not self.tokens:
self.waiting.append(d)
else:
self.tokens = self.tokens - 1
d.callback(self)
return d
def release(self):
"""
Release the token.
Should be called by whoever did the L{acquire}() when the shared
resource is free.
"""
assert self.tokens < self.limit, "Someone released me too many times: too many tokens!"
self.tokens = self.tokens + 1
if self.waiting:
# someone is waiting to acquire token
self.tokens = self.tokens - 1
d = self.waiting.pop(0)
d.callback(self)
class QueueOverflow(Exception):
pass
class QueueUnderflow(Exception):
pass
class DeferredQueue(object):
"""
An event driven queue.
Objects may be added as usual to this queue. When an attempt is
made to retrieve an object when the queue is empty, a L{Deferred} is
returned which will fire when an object becomes available.
@ivar size: The maximum number of objects to allow into the queue
at a time. When an attempt to add a new object would exceed this
limit, L{QueueOverflow} is raised synchronously. L{None} for no limit.
@ivar backlog: The maximum number of L{Deferred} gets to allow at
one time. When an attempt is made to get an object which would
exceed this limit, L{QueueUnderflow} is raised synchronously. L{None}
for no limit.
"""
def __init__(self, size=None, backlog=None):
self.waiting = []
self.pending = []
self.size = size
self.backlog = backlog
def _cancelGet(self, d):
"""
Remove a deferred d from our waiting list, as the deferred has been
canceled.
Note: We do not need to wrap this in a try/except to catch d not
being in self.waiting because this canceller will not be called if
d has fired. put() pops a deferred out of self.waiting and calls
it, so the canceller will no longer be called.
@param d: The deferred that has been canceled.
"""
self.waiting.remove(d)
def put(self, obj):
"""
Add an object to this queue.
@raise QueueOverflow: Too many objects are in this queue.
"""
if self.waiting:
self.waiting.pop(0).callback(obj)
elif self.size is None or len(self.pending) < self.size:
self.pending.append(obj)
else:
raise QueueOverflow()
def get(self):
"""
Attempt to retrieve and remove an object from the queue.
@return: a L{Deferred} which fires with the next object available in
the queue.
@raise QueueUnderflow: Too many (more than C{backlog})
L{Deferred}s are already waiting for an object from this queue.
"""
if self.pending:
return succeed(self.pending.pop(0))
elif self.backlog is None or len(self.waiting) < self.backlog:
d = Deferred(canceller=self._cancelGet)
self.waiting.append(d)
return d
else:
raise QueueUnderflow()
class AlreadyTryingToLockError(Exception):
"""
Raised when L{DeferredFilesystemLock.deferUntilLocked} is called twice on a
single L{DeferredFilesystemLock}.
"""
class DeferredFilesystemLock(lockfile.FilesystemLock):
"""
A L{FilesystemLock} that allows for a L{Deferred} to be fired when the lock is
acquired.
@ivar _scheduler: The object in charge of scheduling retries. In this
implementation this is parameterized for testing.
@ivar _interval: The retry interval for an L{IReactorTime} based scheduler.
@ivar _tryLockCall: A L{DelayedCall} based on C{_interval} that will manage
the next retry for acquiring the lock.
@ivar _timeoutCall: A L{DelayedCall} based on C{deferUntilLocked}'s timeout
argument. This is in charge of timing out our attempt to acquire the
lock.
"""
_interval = 1
_tryLockCall = None
_timeoutCall = None
def __init__(self, name, scheduler=None):
"""
@param name: The name of the lock to acquire
@param scheduler: An object which provides L{IReactorTime}
"""
lockfile.FilesystemLock.__init__(self, name)
if scheduler is None:
from twisted.internet import reactor
scheduler = reactor
self._scheduler = scheduler
def deferUntilLocked(self, timeout=None):
"""
Wait until we acquire this lock. This method is not safe for
concurrent use.
@type timeout: L{float} or L{int}
@param timeout: the number of seconds after which to time out if the
lock has not been acquired.
@return: a L{Deferred} which will callback when the lock is acquired, or
errback with a L{TimeoutError} after timing out or an
L{AlreadyTryingToLockError} if the L{deferUntilLocked} has already
been called and not successfully locked the file.
"""
if self._tryLockCall is not None:
return fail(
AlreadyTryingToLockError(
"deferUntilLocked isn't safe for concurrent use."))
def _cancelLock(reason):
"""
Cancel a L{DeferredFilesystemLock.deferUntilLocked} call.
@type reason: L{failure.Failure}
@param reason: The reason why the call is cancelled.
"""
self._tryLockCall.cancel()
self._tryLockCall = None
if self._timeoutCall is not None and self._timeoutCall.active():
self._timeoutCall.cancel()
self._timeoutCall = None
if self.lock():
d.callback(None)
else:
d.errback(reason)
d = Deferred(lambda deferred: _cancelLock(CancelledError()))
def _tryLock():
if self.lock():
if self._timeoutCall is not None:
self._timeoutCall.cancel()
self._timeoutCall = None
self._tryLockCall = None
d.callback(None)
else:
if timeout is not None and self._timeoutCall is None:
reason = failure.Failure(TimeoutError(
"Timed out acquiring lock: %s after %fs" % (
self.name,
timeout)))
self._timeoutCall = self._scheduler.callLater(
timeout, _cancelLock, reason)
self._tryLockCall = self._scheduler.callLater(
self._interval, _tryLock)
_tryLock()
return d
__all__ = ["Deferred", "DeferredList", "succeed", "fail", "FAILURE", "SUCCESS",
"AlreadyCalledError", "TimeoutError", "gatherResults",
"maybeDeferred", "ensureDeferred",
"waitForDeferred", "deferredGenerator", "inlineCallbacks",
"returnValue",
"DeferredLock", "DeferredSemaphore", "DeferredQueue",
"DeferredFilesystemLock", "AlreadyTryingToLockError",
"CancelledError",
]
| 35.49003
| 117
| 0.618488
|
9ff323afb0b779aa0a34f553d23945e6bd503f51
| 2,668
|
py
|
Python
|
superbench/cli/_commands.py
|
asathiya007/superbenchmark
|
0583862d2dde37a9aaf16f71be3a23710effef99
|
[
"MIT"
] | 1
|
2021-07-06T01:41:15.000Z
|
2021-07-06T01:41:15.000Z
|
superbench/cli/_commands.py
|
asathiya007/superbenchmark
|
0583862d2dde37a9aaf16f71be3a23710effef99
|
[
"MIT"
] | 3
|
2021-09-20T22:37:13.000Z
|
2022-02-19T07:45:15.000Z
|
superbench/cli/_commands.py
|
Nort85/superbenchmark
|
702fb1eb3713b2a8c8d9070f7819bba7bab39be4
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""SuperBench CLI commands."""
from knack.arguments import ArgumentsContext
from knack.commands import CLICommandsLoader, CommandGroup
class SuperBenchCommandsLoader(CLICommandsLoader):
"""SuperBench CLI commands loader."""
def load_command_table(self, args):
"""Load commands into the command table.
Args:
args (list): List of arguments from the command line.
Returns:
collections.OrderedDict: Load commands into the command table.
"""
with CommandGroup(self, '', 'superbench.cli._handler#{}') as g:
g.command('version', 'version_command_handler')
g.command('deploy', 'deploy_command_handler')
g.command('exec', 'exec_command_handler')
g.command('run', 'run_command_handler')
return super().load_command_table(args)
def load_arguments(self, command):
"""Load arguments for commands.
Args:
command: The command to load arguments for.
"""
with ArgumentsContext(self, '') as ac:
ac.argument('docker_image', options_list=('--docker-image', '-i'), type=str, help='Docker image URI.')
ac.argument('docker_username', type=str, help='Docker registry username if authentication is needed.')
ac.argument('docker_password', type=str, help='Docker registry password if authentication is needed.')
ac.argument(
'host_file', options_list=('--host-file', '-f'), type=str, help='Path to Ansible inventory host file.'
)
ac.argument('host_list', options_list=('--host-list', '-l'), type=str, help='Comma separated host list.')
ac.argument('host_username', type=str, help='Host username if needed.')
ac.argument('host_password', type=str, help='Host password or key passphase if needed.')
ac.argument(
'output_dir',
type=str,
help='Path to output directory, outputs/{datetime} will be used if not specified.'
)
ac.argument('private_key', type=str, help='Path to private key if needed.')
ac.argument(
'config_file', options_list=('--config-file', '-c'), type=str, help='Path to SuperBench config file.'
)
ac.argument(
'config_override',
options_list=('--config-override', '-C'),
type=str,
nargs='+',
help='Extra arguments to override config_file.'
)
super().load_arguments(command)
| 43.737705
| 118
| 0.602324
|
8364f6c8a95cf49dddd678773cb99e794bc3703e
| 5,551
|
py
|
Python
|
groupdocs_parser_cloud/models/rectangle.py
|
groupdocs-parser-cloud/groupdocs-parser-cloud-python
|
e306362857cb78b17a5dc73a3bf707cbc6876ca3
|
[
"MIT"
] | 1
|
2021-12-20T20:27:12.000Z
|
2021-12-20T20:27:12.000Z
|
groupdocs_parser_cloud/models/rectangle.py
|
groupdocs-parser-cloud/groupdocs-parser-cloud-python
|
e306362857cb78b17a5dc73a3bf707cbc6876ca3
|
[
"MIT"
] | null | null | null |
groupdocs_parser_cloud/models/rectangle.py
|
groupdocs-parser-cloud/groupdocs-parser-cloud-python
|
e306362857cb78b17a5dc73a3bf707cbc6876ca3
|
[
"MIT"
] | 1
|
2021-09-05T17:46:05.000Z
|
2021-09-05T17:46:05.000Z
|
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd" file="Rectangle.py">
# Copyright (c) 2003-2019 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
class Rectangle(object):
"""
Rectangular area on the page.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'position': 'Point',
'size': 'Size',
'coordinates': 'Coordinates'
}
attribute_map = {
'position': 'Position',
'size': 'Size',
'coordinates': 'Coordinates'
}
def __init__(self, position=None, size=None, coordinates=None, **kwargs): # noqa: E501
"""Initializes new instance of Rectangle""" # noqa: E501
self._position = None
self._size = None
self._coordinates = None
if position is not None:
self.position = position
if size is not None:
self.size = size
if coordinates is not None:
self.coordinates = coordinates
@property
def position(self):
"""
Gets the position. # noqa: E501
Gets the coordinates of the upper-left corner of the rectangular area. # noqa: E501
:return: The position. # noqa: E501
:rtype: Point
"""
return self._position
@position.setter
def position(self, position):
"""
Sets the position.
Gets the coordinates of the upper-left corner of the rectangular area. # noqa: E501
:param position: The position. # noqa: E501
:type: Point
"""
self._position = position
@property
def size(self):
"""
Gets the size. # noqa: E501
Gets or sets the size of the rectangle. # noqa: E501
:return: The size. # noqa: E501
:rtype: Size
"""
return self._size
@size.setter
def size(self, size):
"""
Sets the size.
Gets or sets the size of the rectangle. # noqa: E501
:param size: The size. # noqa: E501
:type: Size
"""
self._size = size
@property
def coordinates(self):
"""
Gets the coordinates. # noqa: E501
Gets or sets the coordinates. # noqa: E501
:return: The coordinates. # noqa: E501
:rtype: Coordinates
"""
return self._coordinates
@coordinates.setter
def coordinates(self, coordinates):
"""
Sets the coordinates.
Gets or sets the coordinates. # noqa: E501
:param coordinates: The coordinates. # noqa: E501
:type: Coordinates
"""
self._coordinates = coordinates
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Rectangle):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.005405
| 92
| 0.567645
|
21491f53c69a99e2931bc07dc1fe385427e6bfe1
| 6,985
|
py
|
Python
|
tracformatter/trac/wiki/interwiki.py
|
kbower/tracwikiconv
|
6db4c856c8ab64beac666226519ae2e8d513c428
|
[
"BSD-3-Clause"
] | 1
|
2017-02-09T15:55:16.000Z
|
2017-02-09T15:55:16.000Z
|
tracformatter/trac/wiki/interwiki.py
|
kbower/tracwikiconv
|
6db4c856c8ab64beac666226519ae2e8d513c428
|
[
"BSD-3-Clause"
] | null | null | null |
tracformatter/trac/wiki/interwiki.py
|
kbower/tracwikiconv
|
6db4c856c8ab64beac666226519ae2e8d513c428
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2006 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christian Boos <cboos@edgewall.org>
import re
from genshi.builder import tag
from trac.cache import cached
from trac.config import ConfigSection
from trac.core import *
from trac.util.translation import _, N_
from trac.wiki.api import IWikiChangeListener, IWikiMacroProvider
from trac.wiki.parser import WikiParser
from trac.wiki.formatter import split_url_into_path_query_fragment
class InterWikiMap(Component):
"""InterWiki map manager."""
implements(IWikiChangeListener, IWikiMacroProvider)
interwiki_section = ConfigSection('interwiki',
"""Every option in the `[interwiki]` section defines one InterWiki
prefix. The option name defines the prefix. The option value defines
the URL, optionally followed by a description separated from the URL
by whitespace. Parametric URLs are supported as well.
'''Example:'''
{{{
[interwiki]
MeatBall = http://www.usemod.com/cgi-bin/mb.pl?
PEP = http://www.python.org/peps/pep-$1.html Python Enhancement Proposal $1
tsvn = tsvn: Interact with TortoiseSvn
}}}
""")
_page_name = 'InterMapTxt'
_interwiki_re = re.compile(r"(%s)[ \t]+([^ \t]+)(?:[ \t]+#(.*))?" %
WikiParser.LINK_SCHEME, re.UNICODE)
_argspec_re = re.compile(r"\$\d")
# The component itself behaves as a read-only map
def __contains__(self, ns):
return ns.upper() in self.interwiki_map
def __getitem__(self, ns):
return self.interwiki_map[ns.upper()]
def keys(self):
return self.interwiki_map.keys()
# Expansion of positional arguments ($1, $2, ...) in URL and title
def _expand(self, txt, args):
"""Replace "$1" by the first args, "$2" by the second, etc."""
def setarg(match):
num = int(match.group()[1:])
return args[num - 1] if 0 < num <= len(args) else ''
return re.sub(InterWikiMap._argspec_re, setarg, txt)
def _expand_or_append(self, txt, args):
"""Like expand, but also append first arg if there's no "$"."""
if not args:
return txt
expanded = self._expand(txt, args)
return txt + args[0] if expanded == txt else expanded
def url(self, ns, target):
"""Return `(url, title)` for the given InterWiki `ns`.
Expand the colon-separated `target` arguments.
"""
ns, url, title = self[ns]
maxargnum = max([0] + [int(a[1:]) for a in
re.findall(InterWikiMap._argspec_re, url)])
target, query, fragment = split_url_into_path_query_fragment(target)
if maxargnum > 0:
args = target.split(':', (maxargnum - 1))
else:
args = [target]
url = self._expand_or_append(url, args)
ntarget, nquery, nfragment = split_url_into_path_query_fragment(url)
if query and nquery:
nquery = '%s&%s' % (nquery, query[1:])
else:
nquery = nquery or query
nfragment = fragment or nfragment # user provided takes precedence
expanded_url = ntarget + nquery + nfragment
expanded_title = self._expand(title, args)
if expanded_title == title:
expanded_title = _("%(target)s in %(name)s",
target=target, name=title)
return expanded_url, expanded_title
# IWikiChangeListener methods
def wiki_page_added(self, page):
if page.name == InterWikiMap._page_name:
del self.interwiki_map
def wiki_page_changed(self, page, version, t, comment, author, ipnr):
if page.name == InterWikiMap._page_name:
del self.interwiki_map
def wiki_page_deleted(self, page):
if page.name == InterWikiMap._page_name:
del self.interwiki_map
def wiki_page_version_deleted(self, page):
if page.name == InterWikiMap._page_name:
del self.interwiki_map
@cached
def interwiki_map(self, db):
"""Map from upper-cased namespaces to (namespace, prefix, title)
values.
"""
from trac.wiki.model import WikiPage
map = {}
content = WikiPage(self.env, InterWikiMap._page_name, db=db).text
in_map = False
for line in content.split('\n'):
if in_map:
if line.startswith('----'):
in_map = False
else:
m = re.match(InterWikiMap._interwiki_re, line)
if m:
prefix, url, title = m.groups()
url = url.strip()
title = title.strip() if title else prefix
map[prefix.upper()] = (prefix, url, title)
elif line.startswith('----'):
in_map = True
for prefix, value in self.interwiki_section.options():
value = value.split(None, 1)
if value:
url = value[0].strip()
title = value[1].strip() if len(value) > 1 else prefix
map[prefix.upper()] = (prefix, url, title)
return map
# IWikiMacroProvider methods
def get_macros(self):
yield 'InterWiki'
def get_macro_description(self, name):
return 'messages', \
N_("Provide a description list for the known InterWiki "
"prefixes.")
def expand_macro(self, formatter, name, content):
interwikis = []
for k in sorted(self.keys()):
prefix, url, title = self[k]
interwikis.append({
'prefix': prefix, 'url': url, 'title': title,
'rc_url': self._expand_or_append(url, ['RecentChanges']),
'description': url if title == prefix else title})
return tag.table(tag.tr(tag.th(tag.em("Prefix")),
tag.th(tag.em("Site"))),
[tag.tr(tag.td(tag.a(w['prefix'], href=w['rc_url'])),
tag.td(tag.a(w['description'],
href=w['url'])))
for w in interwikis ],
class_="wiki interwiki")
| 38.379121
| 84
| 0.56879
|
49a4896a9a68ff446b6343d1474c9634ee0b9ba4
| 8,838
|
py
|
Python
|
python/swagger_client/models/customer_payload.py
|
alextselegidis/easyappointments-sdk
|
8ba969dc1221ea614b70d4d52313f20fc85df1e1
|
[
"CC-BY-3.0"
] | null | null | null |
python/swagger_client/models/customer_payload.py
|
alextselegidis/easyappointments-sdk
|
8ba969dc1221ea614b70d4d52313f20fc85df1e1
|
[
"CC-BY-3.0"
] | null | null | null |
python/swagger_client/models/customer_payload.py
|
alextselegidis/easyappointments-sdk
|
8ba969dc1221ea614b70d4d52313f20fc85df1e1
|
[
"CC-BY-3.0"
] | null | null | null |
# coding: utf-8
"""
Easy!Appointments API
These are the OpenAPI specs that describe the REST API of Easy!Appointments. # noqa: E501
OpenAPI spec version: 1.0.0
Contact: info@easyappointments.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CustomerPayload(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'first_name': 'str',
'last_name': 'str',
'email': 'str',
'phone': 'str',
'address': 'str',
'city': 'str',
'zip': 'str',
'timezone': 'str',
'language': 'str',
'notes': 'str'
}
attribute_map = {
'first_name': 'firstName',
'last_name': 'lastName',
'email': 'email',
'phone': 'phone',
'address': 'address',
'city': 'city',
'zip': 'zip',
'timezone': 'timezone',
'language': 'language',
'notes': 'notes'
}
def __init__(self, first_name=None, last_name=None, email=None, phone=None, address=None, city=None, zip=None, timezone=None, language=None, notes=None): # noqa: E501
"""CustomerPayload - a model defined in Swagger""" # noqa: E501
self._first_name = None
self._last_name = None
self._email = None
self._phone = None
self._address = None
self._city = None
self._zip = None
self._timezone = None
self._language = None
self._notes = None
self.discriminator = None
if first_name is not None:
self.first_name = first_name
if last_name is not None:
self.last_name = last_name
if email is not None:
self.email = email
if phone is not None:
self.phone = phone
if address is not None:
self.address = address
if city is not None:
self.city = city
if zip is not None:
self.zip = zip
if timezone is not None:
self.timezone = timezone
if language is not None:
self.language = language
if notes is not None:
self.notes = notes
@property
def first_name(self):
"""Gets the first_name of this CustomerPayload. # noqa: E501
:return: The first_name of this CustomerPayload. # noqa: E501
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name):
"""Sets the first_name of this CustomerPayload.
:param first_name: The first_name of this CustomerPayload. # noqa: E501
:type: str
"""
self._first_name = first_name
@property
def last_name(self):
"""Gets the last_name of this CustomerPayload. # noqa: E501
:return: The last_name of this CustomerPayload. # noqa: E501
:rtype: str
"""
return self._last_name
@last_name.setter
def last_name(self, last_name):
"""Sets the last_name of this CustomerPayload.
:param last_name: The last_name of this CustomerPayload. # noqa: E501
:type: str
"""
self._last_name = last_name
@property
def email(self):
"""Gets the email of this CustomerPayload. # noqa: E501
:return: The email of this CustomerPayload. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this CustomerPayload.
:param email: The email of this CustomerPayload. # noqa: E501
:type: str
"""
self._email = email
@property
def phone(self):
"""Gets the phone of this CustomerPayload. # noqa: E501
:return: The phone of this CustomerPayload. # noqa: E501
:rtype: str
"""
return self._phone
@phone.setter
def phone(self, phone):
"""Sets the phone of this CustomerPayload.
:param phone: The phone of this CustomerPayload. # noqa: E501
:type: str
"""
self._phone = phone
@property
def address(self):
"""Gets the address of this CustomerPayload. # noqa: E501
:return: The address of this CustomerPayload. # noqa: E501
:rtype: str
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this CustomerPayload.
:param address: The address of this CustomerPayload. # noqa: E501
:type: str
"""
self._address = address
@property
def city(self):
"""Gets the city of this CustomerPayload. # noqa: E501
:return: The city of this CustomerPayload. # noqa: E501
:rtype: str
"""
return self._city
@city.setter
def city(self, city):
"""Sets the city of this CustomerPayload.
:param city: The city of this CustomerPayload. # noqa: E501
:type: str
"""
self._city = city
@property
def zip(self):
"""Gets the zip of this CustomerPayload. # noqa: E501
:return: The zip of this CustomerPayload. # noqa: E501
:rtype: str
"""
return self._zip
@zip.setter
def zip(self, zip):
"""Sets the zip of this CustomerPayload.
:param zip: The zip of this CustomerPayload. # noqa: E501
:type: str
"""
self._zip = zip
@property
def timezone(self):
"""Gets the timezone of this CustomerPayload. # noqa: E501
:return: The timezone of this CustomerPayload. # noqa: E501
:rtype: str
"""
return self._timezone
@timezone.setter
def timezone(self, timezone):
"""Sets the timezone of this CustomerPayload.
:param timezone: The timezone of this CustomerPayload. # noqa: E501
:type: str
"""
self._timezone = timezone
@property
def language(self):
"""Gets the language of this CustomerPayload. # noqa: E501
:return: The language of this CustomerPayload. # noqa: E501
:rtype: str
"""
return self._language
@language.setter
def language(self, language):
"""Sets the language of this CustomerPayload.
:param language: The language of this CustomerPayload. # noqa: E501
:type: str
"""
self._language = language
@property
def notes(self):
"""Gets the notes of this CustomerPayload. # noqa: E501
:return: The notes of this CustomerPayload. # noqa: E501
:rtype: str
"""
return self._notes
@notes.setter
def notes(self, notes):
"""Sets the notes of this CustomerPayload.
:param notes: The notes of this CustomerPayload. # noqa: E501
:type: str
"""
self._notes = notes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CustomerPayload, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CustomerPayload):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.617391
| 171
| 0.559742
|
8245747b892971c64a79edbb4542d4b21ccfe855
| 22,775
|
py
|
Python
|
training/training_loop.py
|
igg002/stylegan2-ada-pytorch
|
f9e58945e599ce09d3a21bb2c49dd5fdab2d5f54
|
[
"BSD-Source-Code"
] | null | null | null |
training/training_loop.py
|
igg002/stylegan2-ada-pytorch
|
f9e58945e599ce09d3a21bb2c49dd5fdab2d5f54
|
[
"BSD-Source-Code"
] | null | null | null |
training/training_loop.py
|
igg002/stylegan2-ada-pytorch
|
f9e58945e599ce09d3a21bb2c49dd5fdab2d5f54
|
[
"BSD-Source-Code"
] | null | null | null |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import time
import copy
import json
import pickle
import psutil
import PIL.Image
import numpy as np
import torch
import dnnlib
from torch_utils import misc
from torch_utils import training_stats
from torch_utils.ops import conv2d_gradfix
from torch_utils.ops import grid_sample_gradfix
import legacy
from metrics import metric_main
#----------------------------------------------------------------------------
def setup_snapshot_image_grid(training_set, random_seed=0):
rnd = np.random.RandomState(random_seed)
gw = np.clip(7680 // training_set.image_shape[2], 7, 32)
gh = np.clip(4320 // training_set.image_shape[1], 4, 32)
# No labels => show random subset of training samples.
if not training_set.has_labels:
all_indices = list(range(len(training_set)))
rnd.shuffle(all_indices)
grid_indices = [all_indices[i % len(all_indices)] for i in range(gw * gh)]
else:
# Group training samples by label.
label_groups = dict() # label => [idx, ...]
for idx in range(len(training_set)):
label = tuple(training_set.get_details(idx).raw_label.flat[::-1])
if label not in label_groups:
label_groups[label] = []
label_groups[label].append(idx)
# Reorder.
label_order = sorted(label_groups.keys())
for label in label_order:
rnd.shuffle(label_groups[label])
# Organize into grid.
grid_indices = []
for y in range(gh):
label = label_order[y % len(label_order)]
indices = label_groups[label]
grid_indices += [indices[x % len(indices)] for x in range(gw)]
label_groups[label] = [indices[(i + gw) % len(indices)] for i in range(len(indices))]
# Load data.
images, labels = zip(*[training_set[i] for i in grid_indices])
return (gw, gh), np.stack(images), np.stack(labels)
#----------------------------------------------------------------------------
def save_image_grid(img, fname, drange, grid_size, is_real=False, is_16_bit=False):
# Option 1) Convert fake images using min-max normalization (original)
# lo, hi = drange
# max_val = 65535 if is_16_bit else 255
# img = np.asarray(img, dtype=np.float32)
# img = (img - lo) * (max_val / (hi - lo))
# img = np.rint(img).clip(0, max_val)
# img = img.astype(np.uint16 if is_16_bit else np.uint8)
# Option 2) Convert fake images (expected range [-1, 1]) in the same way
# it is converted for generating samples
if not is_real:
max_val = 65535 if is_16_bit else 255
half_max = 32767.5 if is_16_bit else 127.5
img = (img * half_max + half_max).clip(0, max_val)
img = img.astype(np.uint16 if is_16_bit else np.uint8)
gw, gh = grid_size
_N, C, H, W = img.shape
img = img.reshape(gh, gw, C, H, W)
img = img.transpose(0, 3, 1, 4, 2)
img = img.reshape(gh * H, gw * W, C)
assert C in [1, 3]
if C == 1:
mode = 'I;16' if is_16_bit else 'L'
PIL.Image.fromarray(img[:, :, 0], mode).save(fname)
if C == 3:
PIL.Image.fromarray(img, 'RGB').save(fname)
#----------------------------------------------------------------------------
def training_loop(
run_dir = '.', # Output directory.
training_set_kwargs = {}, # Options for training set.
data_loader_kwargs = {}, # Options for torch.utils.data.DataLoader.
G_kwargs = {}, # Options for generator network.
D_kwargs = {}, # Options for discriminator network.
G_opt_kwargs = {}, # Options for generator optimizer.
D_opt_kwargs = {}, # Options for discriminator optimizer.
augment_kwargs = None, # Options for augmentation pipeline. None = disable.
loss_kwargs = {}, # Options for loss function.
metrics = [], # Metrics to evaluate during training.
random_seed = 0, # Global random seed.
num_gpus = 1, # Number of GPUs participating in the training.
rank = 0, # Rank of the current process in [0, num_gpus[.
batch_size = 4, # Total batch size for one training iteration. Can be larger than batch_gpu * num_gpus.
batch_gpu = 4, # Number of samples processed at a time by one GPU.
ema_kimg = 10, # Half-life of the exponential moving average (EMA) of generator weights.
ema_rampup = None, # EMA ramp-up coefficient.
G_reg_interval = 4, # How often to perform regularization for G? None = disable lazy regularization.
D_reg_interval = 16, # How often to perform regularization for D? None = disable lazy regularization.
augment_p = 0, # Initial value of augmentation probability.
ada_target = None, # ADA target value. None = fixed p.
ada_interval = 4, # How often to perform ADA adjustment?
ada_kimg = 500, # ADA adjustment speed, measured in how many kimg it takes for p to increase/decrease by one unit.
nimg = 0, # current image count
total_kimg = 25000, # Total length of the training, measured in thousands of real images.
kimg_per_tick = 4, # Progress snapshot interval.
image_snapshot_ticks = 50, # How often to save image snapshots? None = disable.
network_snapshot_ticks = 50, # How often to save network snapshots? None = disable.
resume_pkl = None, # Network pickle to resume training from.
cudnn_benchmark = True, # Enable torch.backends.cudnn.benchmark?
allow_tf32 = False, # Enable torch.backends.cuda.matmul.allow_tf32 and torch.backends.cudnn.allow_tf32?
abort_fn = None, # Callback function for determining whether to abort training. Must return consistent results across ranks.
progress_fn = None, # Callback function for updating training progress. Called for all ranks.
):
# Initialize.
start_time = time.time()
device = torch.device('cuda', rank)
np.random.seed(random_seed * num_gpus + rank)
torch.manual_seed(random_seed * num_gpus + rank)
torch.backends.cudnn.benchmark = cudnn_benchmark # Improves training speed.
torch.backends.cuda.matmul.allow_tf32 = allow_tf32 # Allow PyTorch to internally use tf32 for matmul
torch.backends.cudnn.allow_tf32 = allow_tf32 # Allow PyTorch to internally use tf32 for convolutions
conv2d_gradfix.enabled = True # Improves training speed.
grid_sample_gradfix.enabled = True # Avoids errors with the augmentation pipe.
# Load training set.
if rank == 0:
print('Loading training set...')
training_set = dnnlib.util.construct_class_by_name(**training_set_kwargs) # subclass of training.dataset.Dataset
training_set_sampler = misc.InfiniteSampler(dataset=training_set, rank=rank, num_replicas=num_gpus, seed=random_seed)
training_set_iterator = iter(torch.utils.data.DataLoader(dataset=training_set, sampler=training_set_sampler, batch_size=batch_size//num_gpus, **data_loader_kwargs))
if rank == 0:
print()
print('Num images: ', len(training_set))
print('Image shape:', training_set.image_shape)
print('Label shape:', training_set.label_shape)
print()
# Construct networks.
if rank == 0:
print('Constructing networks...')
common_kwargs = dict(c_dim=training_set.label_dim, img_resolution=training_set.resolution, img_channels=training_set.num_channels)
G = dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
D = dnnlib.util.construct_class_by_name(**D_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
G_ema = copy.deepcopy(G).eval()
G.update_epochs( float(100 * nimg / (total_kimg * 1000)) ) # 100 total top k "epochs" in total_kimg
print('starting G epochs: ',G.epochs)
# Resume from existing pickle.
if (resume_pkl is not None) and (rank == 0):
print(f'Resuming from "{resume_pkl}"')
with dnnlib.util.open_url(resume_pkl) as f:
resume_data = legacy.load_network_pkl(f)
for name, module in [('G', G), ('D', D), ('G_ema', G_ema)]:
misc.copy_params_and_buffers(resume_data[name], module, require_all=False)
# Print network summary tables.
if rank == 0:
z = torch.empty([batch_gpu, G.z_dim], device=device)
c = torch.empty([batch_gpu, G.c_dim], device=device)
img = misc.print_module_summary(G, [z, c])
misc.print_module_summary(D, [img, c])
# Setup augmentation.
if rank == 0:
print('Setting up augmentation...')
augment_pipe = None
ada_stats = None
if (augment_kwargs is not None) and (augment_p > 0 or ada_target is not None):
augment_pipe = dnnlib.util.construct_class_by_name(**augment_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
augment_pipe.p.copy_(torch.as_tensor(augment_p))
if ada_target is not None:
ada_stats = training_stats.Collector(regex='Loss/signs/real')
# Distribute across GPUs.
if rank == 0:
print(f'Distributing across {num_gpus} GPUs...')
ddp_modules = dict()
for name, module in [('G', G),('G_mapping', G.mapping), ('G_synthesis', G.synthesis), ('D', D), (None, G_ema), ('augment_pipe', augment_pipe)]:
if (num_gpus > 1) and (module is not None) and len(list(module.parameters())) != 0:
module.requires_grad_(True)
module = torch.nn.parallel.DistributedDataParallel(module, device_ids=[device], broadcast_buffers=False)
module.requires_grad_(False)
if name is not None:
ddp_modules[name] = module
# Setup training phases.
if rank == 0:
print('Setting up training phases...')
loss = dnnlib.util.construct_class_by_name(device=device, **ddp_modules, **loss_kwargs) # subclass of training.loss.Loss
phases = []
for name, module, opt_kwargs, reg_interval in [('G', G, G_opt_kwargs, G_reg_interval), ('D', D, D_opt_kwargs, D_reg_interval)]:
if reg_interval is None:
opt = dnnlib.util.construct_class_by_name(params=module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
phases += [dnnlib.EasyDict(name=name+'both', module=module, opt=opt, interval=1)]
else: # Lazy regularization.
mb_ratio = reg_interval / (reg_interval + 1)
opt_kwargs = dnnlib.EasyDict(opt_kwargs)
opt_kwargs.lr = opt_kwargs.lr * mb_ratio
opt_kwargs.betas = [beta ** mb_ratio for beta in opt_kwargs.betas]
opt = dnnlib.util.construct_class_by_name(module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
phases += [dnnlib.EasyDict(name=name+'main', module=module, opt=opt, interval=1)]
phases += [dnnlib.EasyDict(name=name+'reg', module=module, opt=opt, interval=reg_interval)]
for phase in phases:
phase.start_event = None
phase.end_event = None
if rank == 0:
phase.start_event = torch.cuda.Event(enable_timing=True)
phase.end_event = torch.cuda.Event(enable_timing=True)
# Export sample images.
grid_size = None
grid_z = None
grid_c = None
is_16_bit = training_set_kwargs.is_16_bit
if rank == 0:
print('Exporting sample images...')
grid_size, images, labels = setup_snapshot_image_grid(training_set=training_set)
max_val = 65535 if is_16_bit else 255
save_image_grid(images, os.path.join(run_dir, 'reals.png'), drange=[0,max_val], grid_size=grid_size, is_real=True, is_16_bit=is_16_bit)
grid_z = torch.randn([labels.shape[0], G.z_dim], device=device).split(batch_gpu)
grid_c = torch.from_numpy(labels).to(device).split(batch_gpu)
images = torch.cat([G_ema(z=z, c=c, noise_mode='const').cpu() for z, c in zip(grid_z, grid_c)]).numpy()
save_image_grid(images, os.path.join(run_dir, 'fakes_init.png'), drange=[-1,1], grid_size=grid_size, is_16_bit=is_16_bit)
# Initialize logs.
if rank == 0:
print('Initializing logs...')
stats_collector = training_stats.Collector(regex='.*')
stats_metrics = dict()
stats_jsonl = None
stats_tfevents = None
if rank == 0:
stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'wt')
try:
import torch.utils.tensorboard as tensorboard
stats_tfevents = tensorboard.SummaryWriter(run_dir)
except ImportError as err:
print('Skipping tfevents export:', err)
# Train.
if rank == 0:
print(f'Training for {total_kimg} kimg...')
print()
cur_nimg = nimg
cur_tick = 0
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = tick_start_time - start_time
batch_idx = 0
if progress_fn is not None:
progress_fn(0, total_kimg)
while True:
# Fetch training data.
with torch.autograd.profiler.record_function('data_fetch'):
phase_real_img, phase_real_c = next(training_set_iterator)
half_max = 32767.5 if is_16_bit else 127.5
phase_real_img = (phase_real_img.to(device).to(torch.float32) / half_max - 1).split(batch_gpu)
phase_real_c = phase_real_c.to(device).split(batch_gpu)
all_gen_z = torch.randn([len(phases) * batch_size, G.z_dim], device=device)
all_gen_z = [phase_gen_z.split(batch_gpu) for phase_gen_z in all_gen_z.split(batch_size)]
all_gen_c = [training_set.get_label(np.random.randint(len(training_set))) for _ in range(len(phases) * batch_size)]
all_gen_c = torch.from_numpy(np.stack(all_gen_c)).pin_memory().to(device)
all_gen_c = [phase_gen_c.split(batch_gpu) for phase_gen_c in all_gen_c.split(batch_size)]
# Execute training phases.
for phase, phase_gen_z, phase_gen_c in zip(phases, all_gen_z, all_gen_c):
if batch_idx % phase.interval != 0:
continue
G.update_epochs( float(100 * nimg / (total_kimg * 1000)) ) # 100 total top k "epochs" in total_kimg
# Initialize gradient accumulation.
if phase.start_event is not None:
phase.start_event.record(torch.cuda.current_stream(device))
phase.opt.zero_grad(set_to_none=True)
phase.module.requires_grad_(True)
# Accumulate gradients over multiple rounds.
for round_idx, (real_img, real_c, gen_z, gen_c) in enumerate(zip(phase_real_img, phase_real_c, phase_gen_z, phase_gen_c)):
sync = (round_idx == batch_size // (batch_gpu * num_gpus) - 1)
gain = phase.interval
loss.accumulate_gradients(phase=phase.name, real_img=real_img, real_c=real_c, gen_z=gen_z, gen_c=gen_c, sync=sync, gain=gain)
# Update weights.
phase.module.requires_grad_(False)
with torch.autograd.profiler.record_function(phase.name + '_opt'):
for param in phase.module.parameters():
if param.grad is not None:
misc.nan_to_num(param.grad, nan=0, posinf=1e5, neginf=-1e5, out=param.grad)
phase.opt.step()
if phase.end_event is not None:
phase.end_event.record(torch.cuda.current_stream(device))
# Update G_ema.
with torch.autograd.profiler.record_function('Gema'):
ema_nimg = ema_kimg * 1000
if ema_rampup is not None:
ema_nimg = min(ema_nimg, cur_nimg * ema_rampup)
ema_beta = 0.5 ** (batch_size / max(ema_nimg, 1e-8))
for p_ema, p in zip(G_ema.parameters(), G.parameters()):
p_ema.copy_(p.lerp(p_ema, ema_beta))
for b_ema, b in zip(G_ema.buffers(), G.buffers()):
b_ema.copy_(b)
# Update state.
cur_nimg += batch_size
batch_idx += 1
# Execute ADA heuristic.
if (ada_stats is not None) and (batch_idx % ada_interval == 0):
ada_stats.update()
adjust = np.sign(ada_stats['Loss/signs/real'] - ada_target) * (batch_size * ada_interval) / (ada_kimg * 1000)
augment_pipe.p.copy_((augment_pipe.p + adjust).max(misc.constant(0, device=device)))
# Perform maintenance tasks once per tick.
done = (cur_nimg >= total_kimg * 1000)
if (not done) and (cur_tick != 0) and (cur_nimg < tick_start_nimg + kimg_per_tick * 1000):
continue
# Print status line, accumulating the same information in stats_collector.
tick_end_time = time.time()
fields = []
fields += [f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"]
fields += [f"kimg {training_stats.report0('Progress/kimg', cur_nimg / 1e3):<8.1f}"]
fields += [f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}"]
fields += [f"sec/tick {training_stats.report0('Timing/sec_per_tick', tick_end_time - tick_start_time):<7.1f}"]
fields += [f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}"]
fields += [f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"]
fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2**30):<6.2f}"]
fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2**30):<6.2f}"]
torch.cuda.reset_peak_memory_stats()
fields += [f"augment {training_stats.report0('Progress/augment', float(augment_pipe.p.cpu()) if augment_pipe is not None else 0):.3f}"]
training_stats.report0('Timing/total_hours', (tick_end_time - start_time) / (60 * 60))
training_stats.report0('Timing/total_days', (tick_end_time - start_time) / (24 * 60 * 60))
if rank == 0:
print(' '.join(fields))
# Check for abort.
if (not done) and (abort_fn is not None) and abort_fn():
done = True
if rank == 0:
print()
print('Aborting...')
# Save image snapshot.
if (rank == 0) and (image_snapshot_ticks is not None) and (done or cur_tick % image_snapshot_ticks == 0):
images = torch.cat([G_ema(z=z, c=c, noise_mode='const').cpu() for z, c in zip(grid_z, grid_c)]).numpy()
save_image_grid(images, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}.png'), drange=[-1,1], grid_size=grid_size, is_16_bit=is_16_bit)
# Save network snapshot.
snapshot_pkl = None
snapshot_data = None
if (network_snapshot_ticks is not None) and (done or cur_tick % network_snapshot_ticks == 0):
snapshot_data = dict(training_set_kwargs=dict(training_set_kwargs))
for name, module in [('G', G), ('D', D), ('G_ema', G_ema), ('augment_pipe', augment_pipe)]:
if module is not None:
if num_gpus > 1:
misc.check_ddp_consistency(module, ignore_regex=r'.*\.w_avg')
module = copy.deepcopy(module).eval().requires_grad_(False).cpu()
snapshot_data[name] = module
del module # conserve memory
snapshot_pkl = os.path.join(run_dir, f'network-snapshot-{cur_nimg//1000:06d}.pkl')
if rank == 0:
with open(snapshot_pkl, 'wb') as f:
pickle.dump(snapshot_data, f)
# Evaluate metrics.
if (snapshot_data is not None) and (len(metrics) > 0):
if rank == 0:
print('Evaluating metrics...')
for metric in metrics:
result_dict = metric_main.calc_metric(metric=metric, G=snapshot_data['G_ema'],
dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, rank=rank, device=device)
if rank == 0:
metric_main.report_metric(result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl)
stats_metrics.update(result_dict.results)
del snapshot_data # conserve memory
# Collect statistics.
for phase in phases:
value = []
if (phase.start_event is not None) and (phase.end_event is not None):
phase.end_event.synchronize()
value = phase.start_event.elapsed_time(phase.end_event)
training_stats.report0('Timing/' + phase.name, value)
stats_collector.update()
stats_dict = stats_collector.as_dict()
# Update logs.
timestamp = time.time()
if stats_jsonl is not None:
fields = dict(stats_dict, timestamp=timestamp)
stats_jsonl.write(json.dumps(fields) + '\n')
stats_jsonl.flush()
if stats_tfevents is not None:
global_step = int(cur_nimg / 1e3)
walltime = timestamp - start_time
for name, value in stats_dict.items():
stats_tfevents.add_scalar(name, value.mean, global_step=global_step, walltime=walltime)
for name, value in stats_metrics.items():
stats_tfevents.add_scalar(f'Metrics/{name}', value, global_step=global_step, walltime=walltime)
stats_tfevents.flush()
if progress_fn is not None:
progress_fn(cur_nimg // 1000, total_kimg)
# Update state.
cur_tick += 1
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = tick_start_time - tick_end_time
if done:
break
# Done.
if rank == 0:
print()
print('Exiting...')
#----------------------------------------------------------------------------
| 51.410835
| 168
| 0.624632
|
2c6f0943734bd4b24e17e4002f4794c03c29a425
| 16,608
|
py
|
Python
|
gradelib.py
|
luisxmetalx/xv6-SO2
|
e1eb50626029a4af1e9f034a467119f4dc7f1d0b
|
[
"MIT-0"
] | null | null | null |
gradelib.py
|
luisxmetalx/xv6-SO2
|
e1eb50626029a4af1e9f034a467119f4dc7f1d0b
|
[
"MIT-0"
] | null | null | null |
gradelib.py
|
luisxmetalx/xv6-SO2
|
e1eb50626029a4af1e9f034a467119f4dc7f1d0b
|
[
"MIT-0"
] | null | null | null |
from __future__ import print_function
import sys, os, re, time, socket, select, subprocess, errno, shutil
from subprocess import check_call, Popen
from optparse import OptionParser
__all__ = []
##################################################################
# Test structure
#
__all__ += ["test", "end_part", "run_tests", "get_current_test"]
TESTS = []
TOTAL = POSSIBLE = 0
PART_TOTAL = PART_POSSIBLE = 0
CURRENT_TEST = None
def test(points, title=None, parent=None):
"""Decorator for declaring test functions. If title is None, the
title of the test will be derived from the function name by
stripping the leading "test_" and replacing underscores with
spaces."""
def register_test(fn, title=title):
if not title:
assert fn.__name__.startswith("test_")
title = fn.__name__[5:].replace("_", " ")
if parent:
title = " " + title
def run_test():
global TOTAL, POSSIBLE, CURRENT_TEST
# Handle test dependencies
if run_test.complete:
return
run_test.complete = True
if parent:
parent()
# Run the test
fail = None
start = time.time()
CURRENT_TEST = run_test
sys.stdout.write("%s: " % title)
sys.stdout.flush()
try:
fn()
except AssertionError as e:
fail = str(e)
# Display and handle test result
POSSIBLE += points
if points:
print("%s" % \
(color("red", "FAIL") if fail else color("green", "OK")), end=' ')
if time.time() - start > 0.1:
print("(%.1fs)" % (time.time() - start), end=' ')
print()
if fail:
print(" %s" % fail.replace("\n", "\n "))
else:
TOTAL += points
for callback in run_test.on_finish:
callback(fail)
CURRENT_TEST = None
# Record test metadata on the test wrapper function
run_test.__name__ = fn.__name__
run_test.title = title
run_test.complete = False
run_test.on_finish = []
TESTS.append(run_test)
return run_test
return register_test
def end_part(name):
def show_part():
global PART_TOTAL, PART_POSSIBLE
print("Part %s score: %d/%d" % \
(name, TOTAL - PART_TOTAL, POSSIBLE - PART_POSSIBLE))
print()
PART_TOTAL, PART_POSSIBLE = TOTAL, POSSIBLE
show_part.title = ""
TESTS.append(show_part)
def run_tests():
"""Set up for testing and run the registered test functions."""
# Handle command line
global options
parser = OptionParser(usage="usage: %prog [-v] [filters...]")
parser.add_option("-v", "--verbose", action="store_true",
help="print commands")
parser.add_option("--color", choices=["never", "always", "auto"],
default="auto", help="never, always, or auto")
(options, args) = parser.parse_args()
# Start with a full build to catch build errors
make()
# Clean the file system if there is one
reset_fs()
# Run tests
limit = list(map(str.lower, args))
try:
for test in TESTS:
if not limit or any(l in test.title.lower() for l in limit):
test()
if not limit:
print("Score: %d/%d" % (TOTAL, POSSIBLE))
except KeyboardInterrupt:
pass
if TOTAL < POSSIBLE:
sys.exit(1)
def get_current_test():
if not CURRENT_TEST:
raise RuntimeError("No test is running")
return CURRENT_TEST
##################################################################
# Assertions
#
__all__ += ["assert_equal", "assert_lines_match"]
def assert_equal(got, expect, msg=""):
if got == expect:
return
if msg:
msg += "\n"
raise AssertionError("%sgot:\n %s\nexpected:\n %s" %
(msg, str(got).replace("\n", "\n "),
str(expect).replace("\n", "\n ")))
def assert_lines_match(text, *regexps, **kw):
"""Assert that all of regexps match some line in text. If a 'no'
keyword argument is given, it must be a list of regexps that must
*not* match any line in text."""
def assert_lines_match_kw(no=[]):
return no
no = assert_lines_match_kw(**kw)
# Check text against regexps
lines = text.splitlines()
good = set()
bad = set()
for i, line in enumerate(lines):
if any(re.match(r, line) for r in regexps):
good.add(i)
regexps = [r for r in regexps if not re.match(r, line)]
if any(re.match(r, line) for r in no):
bad.add(i)
if not regexps and not bad:
return
# We failed; construct an informative failure message
show = set()
for lineno in good.union(bad):
for offset in range(-2, 3):
show.add(lineno + offset)
if regexps:
show.update(n for n in range(len(lines) - 5, len(lines)))
msg = []
last = -1
for lineno in sorted(show):
if 0 <= lineno < len(lines):
if lineno != last + 1:
msg.append("...")
last = lineno
msg.append("%s %s" % (color("red", "BAD ") if lineno in bad else
color("green", "GOOD") if lineno in good
else " ",
lines[lineno]))
if last != len(lines) - 1:
msg.append("...")
if bad:
msg.append("unexpected lines in output")
for r in regexps:
msg.append(color("red", "MISSING") + " '%s'" % r)
raise AssertionError("\n".join(msg))
##################################################################
# Utilities
#
__all__ += ["make", "maybe_unlink", "reset_fs", "color"]
MAKE_TIMESTAMP = 0
def pre_make():
"""Delay prior to running make to ensure file mtimes change."""
while int(time.time()) == MAKE_TIMESTAMP:
time.sleep(0.1)
def post_make():
"""Record the time after make completes so that the next run of
make can be delayed if needed."""
global MAKE_TIMESTAMP
MAKE_TIMESTAMP = int(time.time())
def make(*target):
pre_make()
if Popen(("make",) + target).wait():
sys.exit(1)
post_make()
def show_command(cmd):
from pipes import quote
print("\n$", " ".join(map(quote, cmd)))
def maybe_unlink(*paths):
for path in paths:
try:
os.unlink(path)
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
COLORS = {"default": "\033[0m", "red": "\033[31m", "green": "\033[32m"}
def color(name, text):
if options.color == "always" or (options.color == "auto" and os.isatty(1)):
return COLORS[name] + text + COLORS["default"]
return text
def reset_fs():
if os.path.exists("obj/fs/clean-fs.img"):
shutil.copyfile("obj/fs/clean-fs.img", "obj/fs/fs.img")
##################################################################
# Controllers
#
__all__ += ["QEMU", "GDBClient"]
class QEMU(object):
_GDBPORT = None
def __init__(self, *make_args):
# Check that QEMU is not currently running
try:
GDBClient(self.get_gdb_port(), timeout=0).close()
except socket.error:
pass
else:
print("""\
GDB stub found on port %d.
QEMU appears to already be running. Please exit it if possible or use
'killall qemu' or 'killall qemu.real'.""" % self.get_gdb_port(), file=sys.stderr)
sys.exit(1)
if options.verbose:
show_command(("make",) + make_args)
cmd = ("make", "-s", "--no-print-directory") + make_args
self.proc = Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE)
self.output = ""
self.on_output = []
@staticmethod
def get_gdb_port():
if QEMU._GDBPORT is None:
p = Popen(["make", "-s", "--no-print-directory", "print-gdbport"],
stdout=subprocess.PIPE)
(out, _) = p.communicate()
if p.returncode:
raise RuntimeError(
"Failed to get gdbport: make exited with %d" %
p.returncode)
QEMU._GDBPORT = int(out)
return QEMU._GDBPORT
def fileno(self):
if self.proc:
return self.proc.stdout.fileno()
def handle_read(self):
buf = os.read(self.proc.stdout.fileno(), 4096).decode("utf-8")
self.output += buf
for callback in self.on_output:
callback(buf)
if buf == "":
self.wait()
return
def wait(self):
if self.proc:
self.proc.wait()
self.proc = None
class GDBClient(object):
def __init__(self, port, timeout=15):
start = time.time()
while True:
self.sock = socket.socket()
try:
self.sock.settimeout(1)
self.sock.connect(("localhost", port))
break
except socket.error:
if time.time() >= start + timeout:
raise
self.__buf = ""
def fileno(self):
if self.sock:
return self.sock.fileno()
def handle_read(self):
try:
data = self.sock.recv(4096).decode("utf-8")
except socket.error:
data = ""
if data == "":
self.sock.close()
self.sock = None
return
self.__buf += data
while True:
m = re.search(r"\$([^#]*)#[0-9a-zA-Z]{2}", self.__buf)
if not m:
break
pkt = m.group(1)
self.__buf = self.__buf[m.end():]
if pkt.startswith("T05"):
# Breakpoint
raise TerminateTest
def __send(self, cmd):
packet = "$%s#%02x" % (cmd, sum(map(ord, cmd)) % 256)
self.sock.sendall(packet.encode("utf-8"))
def __send_break(self):
self.sock.sendall(b"\x03")
def close(self):
if self.sock:
self.sock.close()
def cont(self):
self.__send("c")
def breakpoint(self, addr):
self.__send("Z1,%x,1" % addr)
def kill(self):
self.__send_break()
self.__send("k")
##################################################################
# QEMU test runner
#
__all__ += ["TerminateTest", "Runner"]
class TerminateTest(Exception):
pass
class Runner():
def __init__(self, *default_monitors):
self.__default_monitors = default_monitors
def run_qemu(self, *monitors, **kw):
"""Run a QEMU-based test. monitors should functions that will
be called with this Runner instance once QEMU and GDB are
started. Typically, they should register callbacks that throw
TerminateTest when stop events occur. The target_base
argument gives the make target to run. The make_args argument
should be a list of additional arguments to pass to make. The
timeout argument bounds how long to run before returning."""
def run_qemu_kw(target_base="qemu", make_args=[], timeout=30):
return target_base, make_args, timeout
target_base, make_args, timeout = run_qemu_kw(**kw)
# Start QEMU
pre_make()
self.qemu = QEMU(target_base + "-nox-gdb", *make_args)
self.gdb = None
try:
# Wait for QEMU to start or make to fail. This will set
# self.gdb if QEMU starts.
self.qemu.on_output = [self.__monitor_start]
self.__react([self.qemu], timeout=30)
self.qemu.on_output = []
if self.gdb is None:
print("Failed to connect to QEMU; output:")
print(self.qemu.output)
sys.exit(1)
post_make()
# QEMU and GDB are up
self.reactors = [self.qemu, self.gdb]
# Start monitoring
for m in self.__default_monitors + monitors:
m(self)
# Run and react
self.gdb.cont()
self.__react(self.reactors, timeout)
finally:
# Shutdown QEMU
try:
if self.gdb is None:
sys.exit(1)
self.gdb.kill()
self.__react(self.reactors, 5)
self.gdb.close()
self.qemu.wait()
except:
print("""\
Failed to shutdown QEMU. You might need to 'killall qemu' or
'killall qemu.real'.
""")
raise
def __monitor_start(self, output):
if "\n" in output:
try:
self.gdb = GDBClient(self.qemu.get_gdb_port(), timeout=2)
raise TerminateTest
except socket.error:
pass
if output == "":
raise TerminateTest
def __react(self, reactors, timeout):
deadline = time.time() + timeout
try:
while True:
timeleft = deadline - time.time()
if timeleft < 0:
sys.stdout.write("Timeout! ")
sys.stdout.flush()
return
rset = [r for r in reactors if r.fileno() is not None]
if not rset:
return
rset, _, _ = select.select(rset, [], [], timeleft)
for reactor in rset:
reactor.handle_read()
except TerminateTest:
pass
def user_test(self, binary, *monitors, **kw):
"""Run a user test using the specified binary. Monitors and
keyword arguments are as for run_qemu. This runs on a disk
snapshot unless the keyword argument 'snapshot' is False."""
maybe_unlink("obj/kern/init.o", "obj/kern/kernel")
if kw.pop("snapshot", True):
kw.setdefault("make_args", []).append("QEMUEXTRA+=-snapshot")
self.run_qemu(target_base="run-%s" % binary, *monitors, **kw)
def match(self, *args, **kwargs):
"""Shortcut to call assert_lines_match on the most recent QEMU
output."""
assert_lines_match(self.qemu.output, *args, **kwargs)
##################################################################
# Monitors
#
__all__ += ["save", "stop_breakpoint", "call_on_line", "stop_on_line"]
def save(path):
"""Return a monitor that writes QEMU's output to path. If the
test fails, copy the output to path.test-name."""
def setup_save(runner):
f.seek(0)
f.truncate()
runner.qemu.on_output.append(f.write)
get_current_test().on_finish.append(save_on_finish)
def save_on_finish(fail):
f.flush()
save_path = path + "." + get_current_test().__name__[5:]
if fail:
shutil.copyfile(path, save_path)
print(" QEMU output saved to %s" % save_path)
elif os.path.exists(save_path):
os.unlink(save_path)
print(" (Old %s failure log removed)" % save_path)
f = open(path, "w")
return setup_save
def stop_breakpoint(addr):
"""Returns a monitor that stops when addr is reached. addr may be
a number or the name of a symbol."""
def setup_breakpoint(runner):
if isinstance(addr, str):
addrs = [int(sym[:8], 16) for sym in open("obj/kern/kernel.sym")
if sym[11:].strip() == addr]
assert len(addrs), "Symbol %s not found" % addr
runner.gdb.breakpoint(addrs[0])
else:
runner.gdb.breakpoint(addr)
return setup_breakpoint
def call_on_line(regexp, callback):
"""Returns a monitor that calls 'callback' when QEMU prints a line
matching 'regexp'."""
def setup_call_on_line(runner):
buf = [""]
def handle_output(output, buf=buf):
buf[0] += output
while "\n" in buf[0]:
line, buf[0] = buf[0].split("\n", 1)
if re.match(regexp, line):
callback(line)
runner.qemu.on_output.append(handle_output)
return setup_call_on_line
def stop_on_line(regexp):
"""Returns a monitor that stops when QEMU prints a line matching
'regexp'."""
def stop(line):
raise TerminateTest
return call_on_line(regexp, stop)
| 30.642066
| 86
| 0.533538
|
66fdf78239c06e875544d8e349810088540f2390
| 707
|
py
|
Python
|
pincer/objects/message/reaction.py
|
Arthurdw/Pincer
|
eebb8e8f4e7173ba37b8d3049c1d7de793776ed5
|
[
"MIT"
] | 118
|
2021-08-30T15:00:47.000Z
|
2022-03-31T11:06:16.000Z
|
pincer/objects/message/reaction.py
|
Arthurdw/Pincer
|
eebb8e8f4e7173ba37b8d3049c1d7de793776ed5
|
[
"MIT"
] | 343
|
2021-08-30T12:25:57.000Z
|
2022-03-31T07:02:11.000Z
|
pincer/objects/message/reaction.py
|
Arthurdw/Pincer
|
eebb8e8f4e7173ba37b8d3049c1d7de793776ed5
|
[
"MIT"
] | 62
|
2021-08-31T22:30:20.000Z
|
2022-03-25T18:29:11.000Z
|
# Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
from ...utils.api_object import APIObject
if TYPE_CHECKING:
from .emoji import Emoji
@dataclass(repr=False)
class Reaction(APIObject):
"""Represents a Discord Reaction object
Attributes
----------
count: :class:`int`
Times this emoji has been used to react
me: :class:`bool`
Whether the current user reacted using this emoji
emoji: :class:`~pincer.objects.message.emoji.Emoji`
Emoji information
"""
count: int
me: bool
emoji: Emoji
| 22.806452
| 65
| 0.698727
|
a7d397d584ec06c416600fe3ae9ee495b87d62a7
| 407
|
py
|
Python
|
my_meal_planner/recipes/migrations/0008_auto_20210125_1519.py
|
andydandy21/cookbook
|
75066d990eacd7317387050c99fcc17962f535e6
|
[
"MIT"
] | null | null | null |
my_meal_planner/recipes/migrations/0008_auto_20210125_1519.py
|
andydandy21/cookbook
|
75066d990eacd7317387050c99fcc17962f535e6
|
[
"MIT"
] | null | null | null |
my_meal_planner/recipes/migrations/0008_auto_20210125_1519.py
|
andydandy21/cookbook
|
75066d990eacd7317387050c99fcc17962f535e6
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.5 on 2021-01-25 21:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0007_recipe_instructions'),
]
operations = [
migrations.AlterField(
model_name='recipe',
name='instructions',
field=models.TextField(default='', max_length=1000),
),
]
| 21.421053
| 64
| 0.60688
|
df4c7088327c4f142f18b2f7d723c839c73e7a2f
| 1,958
|
py
|
Python
|
calendar_api.py
|
adidiac/events-from-gmail-to-calendar
|
9ea4e05b3695c6ba4742bd23864a121a4d61afa7
|
[
"MIT"
] | null | null | null |
calendar_api.py
|
adidiac/events-from-gmail-to-calendar
|
9ea4e05b3695c6ba4742bd23864a121a4d61afa7
|
[
"MIT"
] | null | null | null |
calendar_api.py
|
adidiac/events-from-gmail-to-calendar
|
9ea4e05b3695c6ba4742bd23864a121a4d61afa7
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import datetime
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import json
import os
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/calendar']
def read_data():
if os.stat("events.txt").st_size != 0:
with open('events.txt') as json_file:
data = json.load(json_file)
return data
return 0
def main(data):
"""Shows basic usage of the Google Calendar API.
Prints the start and name of the next 10 events on the user's calendar.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token_calendar.pickle'):
with open('token_calendar.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials_calendar.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token_calendar.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('calendar', 'v3', credentials=creds)
if data!=0:
for i in data:
event = i
event = service.events().insert(calendarId='primary', body=event).execute()
print('Event created: %s' % (event.get('htmlLink')))
if __name__ == '__main__':
data=read_data()
main(data)
| 36.943396
| 88
| 0.6476
|
109379d4cd8c428aa6ed6d84b9930ee32de2781b
| 1,293
|
py
|
Python
|
climate_util.py
|
System-Exit/IoT-Climate
|
feffd57cb87286ee9006e662144ab2483f5bd79b
|
[
"MIT"
] | null | null | null |
climate_util.py
|
System-Exit/IoT-Climate
|
feffd57cb87286ee9006e662144ab2483f5bd79b
|
[
"MIT"
] | null | null | null |
climate_util.py
|
System-Exit/IoT-Climate
|
feffd57cb87286ee9006e662144ab2483f5bd79b
|
[
"MIT"
] | null | null | null |
import urllib
import os
import sense_hat
# Class containing utility methods for multiple other classes
class ClimateUtil:
# Returns true if able to connect to google, otherwise false
@staticmethod
def checkConnection():
# Attempt connection
try:
host = urllib.request.urlopen("https://www.google.com")
# Since connection was successful, return True
return True
except:
# Since connection failed, return False
return False
# Get calibrated temperature
# Reference: Week 4 Sensehat calibration example
@staticmethod
def getCalibratedTemp(sense):
# Start up pressure sensor as to avoid values of 0
sense.get_temperature_from_pressure()
# Get temperature from humidity and pressure
htemp = sense.get_temperature_from_humidity()
ptemp = sense.get_temperature_from_pressure()
# Get CPU temperature
res = os.popen("vcgencmd measure_temp").readline()
temp_cpu = float(res.replace("temp=", "").replace("'C\n", ""))
# Calculate calibrated temperature
temp = (htemp + ptemp) / 2
temp_calibrated = temp - ((temp_cpu - temp) / 1.5)
# Return calibrated temperature
return temp_calibrated
| 34.945946
| 70
| 0.653519
|
a4638c40dba3e6b23433af012afb49e569ce8070
| 699
|
py
|
Python
|
checkov/common/util/banner.py
|
kylelaker/checkov
|
6eada26030a87f397a6bf1831827b3dc6c5dad2d
|
[
"Apache-2.0"
] | 4,013
|
2019-12-09T13:16:54.000Z
|
2022-03-31T14:31:01.000Z
|
checkov/common/util/banner.py
|
kylelaker/checkov
|
6eada26030a87f397a6bf1831827b3dc6c5dad2d
|
[
"Apache-2.0"
] | 1,258
|
2019-12-17T09:55:51.000Z
|
2022-03-31T19:17:17.000Z
|
checkov/common/util/banner.py
|
kylelaker/checkov
|
6eada26030a87f397a6bf1831827b3dc6c5dad2d
|
[
"Apache-2.0"
] | 638
|
2019-12-19T08:57:38.000Z
|
2022-03-30T21:38:37.000Z
|
from termcolor import colored
from checkov.version import version
from checkov.common.version_manager import check_for_update
banner = r"""
_ _
___| |__ ___ ___| | _______ __
/ __| '_ \ / _ \/ __| |/ / _ \ \ / /
| (__| | | | __/ (__| < (_) \ V /
\___|_| |_|\___|\___|_|\_\___/ \_/
By bridgecrew.io | version: {} """.format(version)
new_version = check_for_update("checkov", version)
if new_version:
banner = "\n" + banner + "\nUpdate available " + colored(version,"grey") + " -> " + colored(new_version, "green") + "\nRun " + colored(
"pip3 install -U checkov", "magenta") + " to update \n"
| 36.789474
| 139
| 0.537911
|
5456daa8cdc537fcf6d3637aa68228e259743d5b
| 670
|
py
|
Python
|
Anime API/app_helper.py
|
vaiiiloz/AIP391_nhom3
|
180d1e55825e44674dc2273b4ee8a8a643d158e4
|
[
"MIT"
] | null | null | null |
Anime API/app_helper.py
|
vaiiiloz/AIP391_nhom3
|
180d1e55825e44674dc2273b4ee8a8a643d158e4
|
[
"MIT"
] | null | null | null |
Anime API/app_helper.py
|
vaiiiloz/AIP391_nhom3
|
180d1e55825e44674dc2273b4ee8a8a643d158e4
|
[
"MIT"
] | null | null | null |
from Pipeline import Pipe
import os
import pandas as pd
pipe = Pipe()
outputPath = r".\templates"
df = pd.read_csv('Anime_df.csv')
def get_image(image_path, img_name):
#try:
image, top1, top5 = pipe.process(image_path)
#except:
# print('Mother Fucker')
image.save(os.path.join(outputPath, img_name))
info = ''
for name in top1:
index = df['Name'][df['Name'] == name].index[0]
orgin = df['Orgin'][index]
gender = df['Gender'][index]
isMain = df['IsMain'][index]
info += 'Name:{}, From anime:{}, Sex:{}, isMain:{};\\n\\n'.format(name, orgin, gender, isMain)
info = info[:-4]
os.remove(image_path)
image.close()
del image
return info, img_name
| 23.103448
| 96
| 0.658209
|
3746518731c439af7944e88c40332d184d1f7107
| 736
|
py
|
Python
|
test_project/pandas_practice6.py
|
BillyLiou/python_test_project
|
e266e2eb627e268a390f1ffc6717979ac44254ad
|
[
"MIT"
] | null | null | null |
test_project/pandas_practice6.py
|
BillyLiou/python_test_project
|
e266e2eb627e268a390f1ffc6717979ac44254ad
|
[
"MIT"
] | null | null | null |
test_project/pandas_practice6.py
|
BillyLiou/python_test_project
|
e266e2eb627e268a390f1ffc6717979ac44254ad
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
'''
@File : pandas_practice6.py
@Time : 2021/02/14 23:54:44
@Author : Billy Liu
@Version : 1.0
@Contact : billy@test.mail
@Desc : None
'''
import pandas as pd
import numpy as np
car_name = ['BMW','TOYOTA','MERCEDEZ']
res_list = []
for i in range(10):
temp_list = [
car_name[np.random.randint(low=0,high=3)],
np.random.randint(low=1,high=1001)
]
res_list.append(temp_list)
# 以下方法為將一個品牌作為group把後面的amount加總
df = pd.DataFrame(res_list,columns=['name','amount'])
df_sum = df.groupby(['name']).sum()
print(df_sum)
print('====================')
df_count = df.groupby(['name'],as_index=False)['name'].agg({'Count':'count'})
print(df_count)
| 22.30303
| 77
| 0.619565
|
157d4ba0c30990866462895456cb7b086323d9c8
| 1,326
|
py
|
Python
|
L2.23NovQ1.py
|
aetooc/PF-Lab-Tasks
|
17899595e948624370b754103be28eb93f236a14
|
[
"MIT"
] | null | null | null |
L2.23NovQ1.py
|
aetooc/PF-Lab-Tasks
|
17899595e948624370b754103be28eb93f236a14
|
[
"MIT"
] | null | null | null |
L2.23NovQ1.py
|
aetooc/PF-Lab-Tasks
|
17899595e948624370b754103be28eb93f236a14
|
[
"MIT"
] | null | null | null |
a=[1,3,5] # first list a
b=[2,4,6] # second list b
c=[] # new list c
print("a =",a)
print("b =",b)
#part (1)
for i in a:
c.append(i) # Appending elements of a in c
for j in b:
c.append(j) # Appending elements of b in c
print('Step:1 c=',c)# New combined list c
print(' ') # For spacing between lines
#part (2)
num = 0
for k in c:
num += k # Sum of elements of c
avg = num /6 # For Average of elements of c
print('Step:2',"Average of c list's elements:",avg)# Average of elements of c
print(' ') # For spacing between lines
#part (3)
c.insert(3,42) # Inserting 42 as a 4th element in c
print('Step:3',c)
print(' ') # For spacing between lines
#part(4)
c.append(7) # Appending 7 in c
c.append(8) # Appending 8 in c
c.append(9) # Appending 9 in c
print('Step:4',c)
print(' ') # For spacing between lines
#part (5)
print('Step:5','First two elements of c')
counter=0
for i in c: # Using For loop
counter += 1
if counter > 2:
break
else:
print(i)
#part(6)
print('Step:6 Last element of b =',b[-1]) # Printing the last element of b
print(' ') # For spacing between lines
#part(7)
counter= 0
for i in a: # Using For loop
counter+=1 # Using this to find length of a
print('Step:7 Length of `a` is =',counter)
| 25.5
| 77
| 0.597285
|
5c3eeaf8a54594a31e42d4d52080639b767bfc84
| 6,450
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/authorization/v20180301/get_policy_set_definition_at_management_group.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/authorization/v20180301/get_policy_set_definition_at_management_group.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/authorization/v20180301/get_policy_set_definition_at_management_group.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetPolicySetDefinitionAtManagementGroupResult',
'AwaitableGetPolicySetDefinitionAtManagementGroupResult',
'get_policy_set_definition_at_management_group',
]
@pulumi.output_type
class GetPolicySetDefinitionAtManagementGroupResult:
"""
The policy set definition.
"""
def __init__(__self__, description=None, display_name=None, id=None, metadata=None, name=None, parameters=None, policy_definitions=None, policy_type=None, type=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if metadata and not isinstance(metadata, dict):
raise TypeError("Expected argument 'metadata' to be a dict")
pulumi.set(__self__, "metadata", metadata)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if parameters and not isinstance(parameters, dict):
raise TypeError("Expected argument 'parameters' to be a dict")
pulumi.set(__self__, "parameters", parameters)
if policy_definitions and not isinstance(policy_definitions, list):
raise TypeError("Expected argument 'policy_definitions' to be a list")
pulumi.set(__self__, "policy_definitions", policy_definitions)
if policy_type and not isinstance(policy_type, str):
raise TypeError("Expected argument 'policy_type' to be a str")
pulumi.set(__self__, "policy_type", policy_type)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The policy set definition description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
The display name of the policy set definition.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the policy set definition.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def metadata(self) -> Optional[Any]:
"""
The policy set definition metadata.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the policy set definition.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> Optional[Any]:
"""
The policy set definition parameters that can be used in policy definition references.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="policyDefinitions")
def policy_definitions(self) -> Sequence['outputs.PolicyDefinitionReferenceResponse']:
"""
An array of policy definition references.
"""
return pulumi.get(self, "policy_definitions")
@property
@pulumi.getter(name="policyType")
def policy_type(self) -> Optional[str]:
"""
The type of policy definition. Possible values are NotSpecified, BuiltIn, and Custom.
"""
return pulumi.get(self, "policy_type")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource (Microsoft.Authorization/policySetDefinitions).
"""
return pulumi.get(self, "type")
class AwaitableGetPolicySetDefinitionAtManagementGroupResult(GetPolicySetDefinitionAtManagementGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPolicySetDefinitionAtManagementGroupResult(
description=self.description,
display_name=self.display_name,
id=self.id,
metadata=self.metadata,
name=self.name,
parameters=self.parameters,
policy_definitions=self.policy_definitions,
policy_type=self.policy_type,
type=self.type)
def get_policy_set_definition_at_management_group(management_group_id: Optional[str] = None,
policy_set_definition_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPolicySetDefinitionAtManagementGroupResult:
"""
The policy set definition.
:param str management_group_id: The ID of the management group.
:param str policy_set_definition_name: The name of the policy set definition to get.
"""
__args__ = dict()
__args__['managementGroupId'] = management_group_id
__args__['policySetDefinitionName'] = policy_set_definition_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:authorization/v20180301:getPolicySetDefinitionAtManagementGroup', __args__, opts=opts, typ=GetPolicySetDefinitionAtManagementGroupResult).value
return AwaitableGetPolicySetDefinitionAtManagementGroupResult(
description=__ret__.description,
display_name=__ret__.display_name,
id=__ret__.id,
metadata=__ret__.metadata,
name=__ret__.name,
parameters=__ret__.parameters,
policy_definitions=__ret__.policy_definitions,
policy_type=__ret__.policy_type,
type=__ret__.type)
| 37.719298
| 194
| 0.662326
|
369dd8901703ca342588a76afd8b6db4c3cab414
| 6,045
|
py
|
Python
|
xos/services/mcordservice/models.py
|
xmaruto/mcord
|
3678a3d10c3703c2b73f396c293faebf0c82a4f4
|
[
"Apache-2.0"
] | null | null | null |
xos/services/mcordservice/models.py
|
xmaruto/mcord
|
3678a3d10c3703c2b73f396c293faebf0c82a4f4
|
[
"Apache-2.0"
] | null | null | null |
xos/services/mcordservice/models.py
|
xmaruto/mcord
|
3678a3d10c3703c2b73f396c293faebf0c82a4f4
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber
from core.models.plcorebase import StrippedCharField
import os
from django.db import models, transaction
from django.forms.models import model_to_dict
from django.db.models import Q
from operator import itemgetter, attrgetter, methodcaller
import traceback
from xos.exceptions import *
from core.models import SlicePrivilege, SitePrivilege
from sets import Set
MCORD_KIND = "mcordservice"
# The class to represent the service. Most of the service logic is given for us
# in the Service class but, we have some configuration that is specific for
# this example.
class MCORDService(Service):
KIND = MCORD_KIND
class Meta:
# When the proxy field is set to True the model is represented as
# it's superclass in the database, but we can still change the python
# behavior. In this case HelloWorldServiceComplete is a Service in the
# database.
proxy = True
# The name used to find this service, all directories are named this
app_label = "mcordservice"
verbose_name = "MCORD Service"
# This is the class to represent the tenant. Most of the logic is given to use
# in TenantWithContainer, however there is some configuration and logic that
# we need to define for this example.
class MCORDServiceComponent(TenantWithContainer):
class Meta:
# Same as a above, HelloWorldTenantComplete is represented as a
# TenantWithContainer, but we change the python behavior.
proxy = True
verbose_name = "MCORD Service Component"
# The kind of the service is used on forms to differentiate this service
# from the other services.
KIND = MCORD_KIND
# Ansible requires that the sync_attributes field contain nat_ip and nat_mac
# these will be used to determine where to SSH to for ansible.
# Getters must be defined for every attribute specified here.
sync_attributes = ("private_ip", "private_mac",
"mcord_ip", "mcord_mac",
"nat_ip", "nat_mac",)
# default_attributes is used cleanly indicate what the default values for
# the fields are.
default_attributes = {'display_message': 'Hello MCORD!'}
def __init__(self, *args, **kwargs):
mcord_services = MCORDService.get_service_objects().all()
# When the tenant is created the default service in the form is set
# to be the first created HelloWorldServiceComplete
if mcord_services:
self._meta.get_field(
"provider_service").default = mcord_services[0].id
super(MCORDServiceComponent, self).__init__(*args, **kwargs)
def can_update(self, user):
#Allow creation of this model instances for non-admin users also
return True
def save(self, *args, **kwargs):
if not self.creator:
if not getattr(self, "caller", None):
# caller must be set when creating a monitoring channel since it creates a slice
raise XOSProgrammingError("ServiceComponents's self.caller was not set")
self.creator = self.caller
if not self.creator:
raise XOSProgrammingError("ServiceComponents's self.creator was not set")
super(MCORDServiceComponent, self).save(*args, **kwargs)
# This call needs to happen so that an instance is created for this
# tenant is created in the slice. One instance is created per tenant.
model_policy_mcord_servicecomponent(self.pk)
def delete(self, *args, **kwargs):
# Delete the instance that was created for this tenant
self.cleanup_container()
super(MCORDServiceComponent, self).delete(*args, **kwargs)
# Getter for the message that will appear on the webpage
# By default it is "Hello World!"
@property
def display_message(self):
return self.get_attribute(
"display_message",
self.default_attributes['display_message'])
# Setter for the message that will appear on the webpage
@display_message.setter
def display_message(self, value):
self.set_attribute("display_message", value)
@property
def addresses(self):
if (not self.id) or (not self.instance):
return {}
addresses = {}
for ns in self.instance.ports.all():
if "private" in ns.network.name.lower():
addresses["private"] = (ns.ip, ns.mac)
elif "nat" in ns.network.name.lower():
addresses["nat"] = (ns.ip, ns.mac)
elif "mcord_service_internal_net" in ns.network.labels.lower():
addresses["mcordservice"] = (ns.ip, ns.mac)
return addresses
# This getter is necessary because nat_ip is a sync_attribute
@property
def nat_ip(self):
return self.addresses.get("nat", (None, None))[0]
# This getter is necessary because nat_mac is a sync_attribute
@property
def nat_mac(self):
return self.addresses.get("nat", (None, None))[1]
@property
def private_ip(self):
return self.addresses.get("nat", (None, None))[0]
@property
def private_mac(self):
return self.addresses.get("nat", (None, None))[1]
@property
def mcord_ip(self):
return self.addresses.get("nat", (None, None))[0]
@property
def mcord_mac(self):
return self.addresses.get("nat", (None, None))[1]
def model_policy_mcord_servicecomponent(pk):
# This section of code is atomic to prevent race conditions
with transaction.atomic():
# We find all of the tenants that are waiting to update
component = MCORDServiceComponent.objects.select_for_update().filter(pk=pk)
if not component:
return
# Since this code is atomic it is safe to always use the first tenant
component = component[0]
component.manage_container()
| 38.503185
| 128
| 0.673118
|
797591db6cd308daf69fc4773ed6b13c47e165b0
| 28,432
|
py
|
Python
|
opentamp/core/util_classes/openrave_body.py
|
Algorithmic-Alignment-Lab/openTAMP
|
f0642028d551d0436b3a3dbc3bfb2f23a00adc14
|
[
"MIT"
] | 4
|
2022-02-13T15:52:18.000Z
|
2022-03-26T17:33:13.000Z
|
opentamp/core/util_classes/openrave_body.py
|
Algorithmic-Alignment-Lab/openTAMP
|
f0642028d551d0436b3a3dbc3bfb2f23a00adc14
|
[
"MIT"
] | 1
|
2022-02-13T22:48:09.000Z
|
2022-02-13T22:48:09.000Z
|
opentamp/core/util_classes/openrave_body.py
|
Algorithmic-Alignment-Lab/openTAMP
|
f0642028d551d0436b3a3dbc3bfb2f23a00adc14
|
[
"MIT"
] | null | null | null |
import numpy as np
from math import cos, sin, atan2
import pybullet as P
import opentamp.core.util_classes.common_constants as const
import opentamp.core.util_classes.transform_utils as T
from opentamp.core.util_classes.robots import Robot, PR2, Baxter, Washer, NAMO
from opentamp.core.util_classes.items import Item, Box, Can, BlueCan, RedCan, Circle, \
BlueCircle, RedCircle, GreenCircle, Obstacle, \
Wall, Table, Basket, XMLItem, Door
WALL_THICKNESS = 1
CLOSET_POINTS = [[-6.0,-8.0],[-6.0,4.0],[1.9,4.0],[1.9,8.0],[5.0,8.0],[5.0,4.0],[13.0,4.0],[13.0,-8.0],[-6.0,-8.0]]
CLOSET_POINTS = [[-7.0,-9.0],[-7.0,4.0],[1.9,4.0],[1.9,8.0],[5.0,8.0],[5.0,4.0],[14.0,4.0],[14.0,-9.0],[-7.0,-9.0]]
CLOSET_POINTS = [[-7.0,-10.0],[-7.0,4.0],[1.9,4.0],[1.9,8.0],[5.0,8.0],[5.0,4.0],[14.0,4.0],[14.0,-10.0],[-7.0,-10.0]]
CLOSET_POINTS = [[-7.0,-12.0],[-7.0,4.0],[1.9,4.0],[1.9,8.0],[5.0,8.0],[5.0,4.0],[14.0,4.0],[14.0,-12.0],[-7.0,-12.0]]
CLOSET_POINTS = [[-7.0,-10.0],[-7.0,4.0],[1.5,4.0],[1.5,8.0],[5.5,8.0],[5.5,4.0],[14.0,4.0],[14.0,-10.0],[-7.0,-10.0]]
#CLOSET_POINTS = [[-7.0,-10.0],[-7.0,4.0],[1.5,4.0],[5.5,4.0],[14.0,4.0],[14.0,-10.0],[-7.0,-10.0]]
class OpenRAVEBody(object):
def __init__(self, env, name, geom):
self.name = name
self._env = env
self._geom = geom
if isinstance(geom, Robot):
self._add_robot(geom)
elif isinstance(geom, XMLItem):
self._add_xml_item(geom)
elif isinstance(geom, Item):
self._add_item(geom)
else:
raise ValueError(f"Geometry not supported for {geom} for OpenRAVEBody")
# self.set_transparency(0.5)
def delete(self):
P.removeCollisionShape(self.body_id)
def isrobot(self):
return isinstance(self._geom, Robot)
def set_transparency(self, transparency):
visual_infos = P.getVisualShapeData(self.body_id)
for info in visual_infos:
link_index = info[1]
link_rgba = info[7]
P.changeVisualShape(self.body_id, link_index, rgbaColor=list(link_rgba[:3])+[transparency])
def _add_robot(self, geom):
if not geom.is_initialized():
geom.setup(None)
self.env_body = geom.id
self.body_id = geom.id
def _add_xml_item(self, geom):
if not geom.is_initialized():
geom.setup(None)
self.env_body = geom.id
self.body_id = geom.id
def _add_item(self, geom):
try:
fun_name = "self._add_{}".format(geom._type)
eval(fun_name)(geom)
except Exception as e:
print('Could not add', geom._type, e)
raise e
#self._add_obj(geom)
def _add_circle(self, geom):
color = [1,0,0]
if hasattr(geom, "color") and geom.color == 'blue':
color = [0, 0, 1]
elif hasattr(geom, "color") and geom.color == 'green':
color = [0, 1, 0]
elif hasattr(geom, "color") and geom.color == 'red':
color = [1, 0, 0]
self.col_body_id = P.createCollisionShape(shapeType=P.GEOM_CYLINDER, radius=geom.radius, height=2)
self.body_id = P.createMultiBody(1, self.col_body_id)
def _add_can(self, geom):
color = [1,0,0]
if hasattr(geom, "color") and geom.color == 'blue':
color = [0, 0, 1]
elif hasattr(geom, "color") and geom.color == 'green':
color = [0, 1, 0]
elif hasattr(geom, "color") and geom.color == 'red':
color = [1, 0, 0]
self.col_body_id = P.createCollisionShape(shapeType=P.GEOM_CYLINDER, radius=geom.radius, height=geom.height)
self.body_id = P.createMultiBody(1, self.col_body_id)
#def _add_obstacle(self, geom):
# obstacles = np.matrix('-0.576036866359447, 0.918128654970760, 1;\
# -0.806451612903226,-1.07017543859649, 1;\
# 1.01843317972350,-0.988304093567252, 1;\
# 0.640552995391705,0.906432748538011, 1;\
# -0.576036866359447, 0.918128654970760, -1;\
# -0.806451612903226,-1.07017543859649, -1;\
# 1.01843317972350,-0.988304093567252, -1;\
# 0.640552995391705,0.906432748538011, -1')
# body = RaveCreateKinBody(self._env, '')
# vertices = np.array(obstacles)
# indices = np.array([[0, 1, 2], [2, 3, 0], [4, 5, 6], [6, 7, 4], [0, 4, 5],
# [0, 1, 5], [1, 2, 5], [5, 6, 2], [2, 3, 6], [6, 7, 3],
# [0, 3, 7], [0, 4, 7]])
# body.InitFromTrimesh(trimesh=TriMesh(vertices, indices), draw=True)
# body.SetName(self.name)
# for link in body.GetLinks():
# for geom in link.GetGeometries():
# geom.SetDiffuseColor((.9, .9, .9))
# self.env_body = body
# self._env.AddKinBody(body)
def _add_box(self, geom):
self.col_body_id = P.createCollisionShape(shapeType=P.GEOM_BOX, halfExtents=geom.dim)
self.body_id = P.createMultiBody(1, self.col_body_id)
def _add_sphere(self, geom):
self.col_body_id = P.createCollisionShape(shapeType=P.GEOM_SPHERE, radius=geom.radius)
self.body_id = P.createMultiBody(1, self.col_body_id)
def _add_door(self, geom):
self.body_id, self.col_body_id = OpenRAVEBody.create_door(self._env, geom.length)
def _add_wall(self, geom):
self.body_id = OpenRAVEBody.create_wall(self._env, geom.wall_type)
#def _add_obj(self, geom):
# self.env_body = self._env.ReadKinBodyXMLFile(geom.shape)
# self.env_body.SetName(self.name)
# self._env.Add(self.env_body)
#def _add_table(self, geom):
# self.env_body = OpenRAVEBody.create_table(self._env, geom)
# self.env_body.SetName(self.name)
# self._env.Add(self.env_body)
#def _add_basket(self, geom):
# self.env_body = self._env.ReadKinBodyXMLFile(geom.shape)
# self.env_body.SetName(self.name)
# self._env.Add(self.env_body)
def set_pose(self, base_pose, rotation = [0, 0, 0]):
trans = None
if np.any(np.isnan(base_pose)) or np.any(np.isnan(rotation)):
return
if hasattr(self._geom, 'jnt_names') and 'pose' in self._geom.jnt_names:
dof_map = {'pose': base_pose}
return self.set_dof(dof_map)
if isinstance(self._geom, Baxter):
pos = np.r_[base_pose[:2], 0]
quat = T.euler_to_quaternion([0, 0, base_pose[2]], order='xyzw')
elif len(base_pose) == 2:
base_pose = np.array(base_pose).flatten()
pos = np.concatenate([base_pose, [0]]).flatten()
if len(rotation) == 1:
rotation = [0., 0., rotation[0]]
# quat = [0, 0, 0, 1]
quat = T.euler_to_quaternion(rotation, order='xyzw')
else:
pos = base_pose
quat = T.euler_to_quaternion(rotation, order='xyzw')
P.resetBasePositionAndOrientation(self.body_id, pos, quat)
def set_dof(self, dof_value_map, debug=False):
"""
dof_value_map: A dict that maps robot attribute name to a list of corresponding values
"""
# make sure only sets dof for robot
# assert isinstance(self._geom, Robot)
#if not isinstance(self._geom, Robot): return
if not hasattr(self._geom, 'dof_map'): return
for key in dof_value_map:
if key not in self._geom.dof_map:
if debug: print('Cannot set dof for', key)
continue
if type(self._geom.dof_map[key]) is int:
P.resetJointState(self.body_id, self._geom.dof_map[key], dof_value_map[key])
else:
for i, jnt_ind in enumerate(self._geom.dof_map[key]):
if type(dof_value_map[key]) is int:
val = dof_value_map[key]
else:
ind = min(i, len(dof_value_map[key])-1)
val = dof_value_map[key][ind]
P.resetJointState(self.body_id, jnt_ind, val)
def _set_active_dof_inds(self, inds = None):
"""
Set active dof index to the one we are interested
This function is implemented to simplify jacobian calculation in the CollisionPredicate
inds: Optional list of index specifying dof index we are interested in
"""
pass
#@staticmethod
#def create_cylinder(env, body_name, t, dims, color=[0, 1, 1]):
# infocylinder = OpenRAVEBody.create_body_info(GeometryType.Cylinder, dims, color)
# if type(env) != Environment:
# print("Environment object is not valid")
# cylinder = RaveCreateKinBody(env, '')
# cylinder.InitFromGeometries([infocylinder])
# cylinder.SetName(body_name)
# cylinder.SetTransform(t)
# return cylinder
#@staticmethod
#def create_box(env, name, transform, dims, color=[0,0,1]):
# infobox = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, dims, color, 0, True)
# box = RaveCreateKinBody(env,'')
# box.InitFromGeometries([infobox])
# box.SetName(name)
# box.SetTransform(transform)
# return box
#@staticmethod
#def create_sphere(env, name, transform, dims, color=[0,0,1]):
# infobox = OpenRAVEBody.create_body_info(GeometryType.Sphere, dims, color)
# sphere = RaveCreateKinBody(env,'')
# sphere.InitFromGeometries([infobox])
# sphere.SetName(name)
# sphere.SetTransform(transform)
# return sphere
#@staticmethod
#def create_body_info(body_type, dims, color, transparency = 0.8, visible = True):
# infobox = KinBody.Link.GeometryInfo()
# infobox._type = body_type
# infobox._vGeomData = dims
# infobox._bVisible = True
# infobox._fTransparency = transparency
# infobox._vDiffuseColor = color
# return infobox
@staticmethod
def create_door(env, door_len):
from opentamp.core.util_classes.namo_grip_predicates import HANDLE_OFFSET
door_color = [0.5, 0.2, 0.1]
box_infos = []
cols = [P.createCollisionShape(shapeType=P.GEOM_CYLINDER, radius=0.05, height=0.1),
P.createCollisionShape(shapeType=P.GEOM_BOX, halfExtents=[door_len/2.-0.1, 0.1, 0.4]),
P.createCollisionShape(shapeType=P.GEOM_CYLINDER, radius=0.3, height=0.4),]
link_pos = [(0, 0, 0), (door_len/2., 0., 0.), (door_len/2., -HANDLE_OFFSET, 0.)]
door = P.createMultiBody(basePosition=[0,0,0],
linkMasses=[1 for _ in cols],
linkCollisionShapeIndices=[ind for ind in cols],
linkVisualShapeIndices=[-1 for _ in cols],
linkPositions=[pos for pos in link_pos],
linkOrientations=[[0,0,0,1] for _ in cols],
linkInertialFramePositions=[[0,0,0] for _ in cols],
linkInertialFrameOrientations=[[0,0,0,1] for _ in cols],
linkParentIndices=[0, 1, 2],
linkJointTypes=[P.JOINT_REVOLUTE]+[P.JOINT_FIXED for _ in cols[1:]],
linkJointAxis=[[0,0,1] for _ in cols]
)
return door, cols
@staticmethod
def create_wall(env, wall_type):
wall_color = [0.5, 0.2, 0.1]
box_infos = []
if wall_type == 'closet':
# wall_endpoints = [[-6.0,-8.0],[-6.0,4.0],[1.9,4.0],[1.9,8.0],[5.0,8.0],[5.0,4.0],[13.0,4.0],[13.0,-8.0],[-6.0,-8.0]]
wall_endpoints = CLOSET_POINTS
elif wall_type == 'three_room':
wall_endpoints = [[-6.0,-8.0],[-6.0,4.0],[-1.5,4.0],
[-1.5,2.0],[-1.5,4.0],[6.0,4.0],
[6.0,2.0],[6.0,4.0],[13.0,4.0],
[13.0,-8.0],[6.0,-8.0],[6.0,-1.5],
[6.0,-8.0],[-1.5,-8.0],[-1.5,-1.5],
[-1.5,-8.0], [-6.0,-8.0]]
else:
raise NotImplemented
for i, (start, end) in enumerate(zip(wall_endpoints[0:-1], wall_endpoints[1:])):
dim_x, dim_y = 0, 0
thickness = WALL_THICKNESS
if start[0] == end[0]:
ind_same, ind_diff = 0, 1
length = abs(start[ind_diff] - end[ind_diff])
dim_x, dim_y = thickness, length/2 + thickness
elif start[1] == end[1]:
ind_same, ind_diff = 1, 0
length = abs(start[ind_diff] - end[ind_diff])
dim_x, dim_y = length/2 + thickness, thickness
else:
raise NotImplemented('Can only create axis-aligned walls')
transform = np.eye(4)
transform[ind_same, 3] = start[ind_same]
if start[ind_diff] < end[ind_diff]:
transform[ind_diff, 3] = start[ind_diff] + length/2
else:
transform[ind_diff, 3] = end[ind_diff] + length/2
dims = [dim_x, dim_y, 1]
box_infos.append((dims, transform[:3,3]))
cols = [P.createCollisionShape(shapeType=P.GEOM_BOX, halfExtents=h) for h, t in box_infos]
wall = P.createMultiBody(basePosition=[0,0,0],
linkMasses=[1 for _ in cols],
linkCollisionShapeIndices=[ind for ind in cols],
linkVisualShapeIndices=[-1 for _ in cols],
linkPositions=[t[:3] for _, t in box_infos],
linkOrientations=[[0,0,0,1] for _, t in box_infos],
linkInertialFramePositions=[[0,0,0] for _ in cols],
linkInertialFrameOrientations=[[0,0,0,1] for _, t in box_infos],
linkParentIndices=[0 for _ in cols],
linkJointTypes=[P.JOINT_FIXED for _ in cols],
linkJointAxis=[[0,0,1] for _ in cols]
)
return wall
@staticmethod
def get_wall_dims(wall_type='closet'):
if wall_type == 'closet':
wall_endpoints = CLOSET_POINTS
elif wall_type == 'three_room':
wall_endpoints = [[-6.0,-8.0],[-6.0,4.0],[-1.5,4.0],
[-1.5,2.0],[-1.5,4.0],[6.0,4.0],
[6.0,2.0],[6.0,4.0],[13.0,4.0],
[13.0,-8.0],[6.0,-8.0],[6.0,-1.5],
[6.0,-8.0],[-1.5,-8.0],[-1.5,-1.5],
[-1.5,-8.0], [-6.0,-8.0]]
else:
raise NotImplemented
dims = []
for i, (start, end) in enumerate(zip(wall_endpoints[0:-1], wall_endpoints[1:])):
dim_x, dim_y = 0, 0
thickness = WALL_THICKNESS
if start[0] == end[0]:
ind_same, ind_diff = 0, 1
length = abs(start[ind_diff] - end[ind_diff])
dim_x, dim_y = thickness, length/2 + thickness
elif start[1] == end[1]:
ind_same, ind_diff = 1, 0
length = abs(start[ind_diff] - end[ind_diff])
dim_x, dim_y = length/2 + thickness, thickness
else:
raise NotImplemented('Can only create axis-aligned walls')
transform = np.eye(4)
transform[ind_same, 3] = start[ind_same]
if start[ind_diff] < end[ind_diff]:
transform[ind_diff, 3] = start[ind_diff] + length/2
else:
transform[ind_diff, 3] = end[ind_diff] + length/2
dims.append(([dim_x, dim_y, 1], transform))
return dims
#@staticmethod
#def create_basket_col(env):
# long_info1 = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, [.3,.15,.015], [0, 0.75, 1])
# long_info2 = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, [.3,.15,.015], [0, 0.75, 1])
# short_info1 = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, [.015,.15,.2], [0, 0.75, 1])
# short_info2 = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, [.015,.15,.2], [0, 0.75, 1])
# bottom_info = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, [.3,.015,.2], [0, 0.75, 1])
# long_info1._t = OpenRAVEBody.transform_from_obj_pose([0,-0.118,0.208],[0,0,0.055])
# long_info2._t = OpenRAVEBody.transform_from_obj_pose([0,-0.118,-0.208],[0,0,-0.055])
# short_info1._t = OpenRAVEBody.transform_from_obj_pose([0.309,-0.118,0],[-0.055,0,0])
# short_info2._t = OpenRAVEBody.transform_from_obj_pose([-0.309,-0.118,0],[0.055,0,0])
# bottom_info._t = OpenRAVEBody.transform_from_obj_pose([0,-0.25,0],[0,0,0])
# basket = RaveCreateRobot(env, '')
# basket.InitFromGeometries([long_info1, long_info2, short_info1, short_info2, bottom_info])
# return basket
#@staticmethod
#def create_table(env, geom):
# thickness = geom.thickness
# leg_height = geom.leg_height
# back = geom.back
# dim1, dim2 = geom.table_dim
# legdim1, legdim2 = geom.leg_dim
# table_color = [0.5, 0.2, 0.1]
# component_type = KinBody.Link.GeomType.Box
# tabletop = OpenRAVEBody.create_body_info(component_type, [dim1/2, dim2/2, thickness/2], table_color)
# leg1 = OpenRAVEBody.create_body_info(component_type, [legdim1/2, legdim2/2, leg_height/2], table_color)
# leg1._t[0, 3] = dim1/2 - legdim1/2
# leg1._t[1, 3] = dim2/2 - legdim2/2
# leg1._t[2, 3] = -leg_height/2 - thickness/2
# leg2 = OpenRAVEBody.create_body_info(component_type, [legdim1/2, legdim2/2, leg_height/2], table_color)
# leg2._t[0, 3] = dim1/2 - legdim1/2
# leg2._t[1, 3] = -dim2/2 + legdim2/2
# leg2._t[2, 3] = -leg_height/2 - thickness/2
# leg3 = OpenRAVEBody.create_body_info(component_type, [legdim1/2, legdim2/2, leg_height/2], table_color)
# leg3._t[0, 3] = -dim1/2 + legdim1/2
# leg3._t[1, 3] = dim2/2 - legdim2/2
# leg3._t[2, 3] = -leg_height/2 - thickness/2
# leg4 = OpenRAVEBody.create_body_info(component_type, [legdim1/2, legdim2/2, leg_height/2], table_color)
# leg4._t[0, 3] = -dim1/2 + legdim1/2
# leg4._t[1, 3] = -dim2/2 + legdim2/2
# leg4._t[2, 3] = -leg_height/2 - thickness/2
# if back:
# back_plate = OpenRAVEBody.create_body_info(component_type, [legdim1/10, dim2/2, leg_height-thickness/2], table_color)
# back_plate._t[0, 3] = dim1/2 - legdim1/10
# back_plate._t[1, 3] = 0
# back_plate._t[2, 3] = -leg_height/2 - thickness/4
# table = RaveCreateRobot(env, '')
# if not back:
# table.InitFromGeometries([tabletop, leg1, leg2, leg3, leg4])
# else:
# table.InitFromGeometries([tabletop, leg1, leg2, leg3, leg4, back_plate])
# return table
@staticmethod
def base_pose_2D_to_mat(pose):
# x, y = pose
assert len(pose) == 2
x = pose[0]
y = pose[1]
pos = [x, y, 0]
rot = 0
matrix = T.pose2mat((pos, [1, 0, 0, 0]))
return matrix
@staticmethod
def base_pose_3D_to_mat(pose):
# x, y, z = pose
assert len(pose) == 3
x = pose[0]
y = pose[1]
z = pose[2]
pos = [x, y, z]
rot = 0
matrix = T.pose2mat((pos, [0, 0, 0, 1]))
return matrix
@staticmethod
def mat_to_base_pose_2D(mat):
return T.mat2pose(mat)[0][:2]
@staticmethod
def base_pose_to_mat(pose):
# x, y, rot = pose
assert len(pose) == 3
x = pose[0]
y = pose[1]
rot = pose[2]
pos = [x, y, 0]
quat = T.euler_to_quaternion([0, 0, rot], order='xyzw')
matrix = T.pose2mat((pos, quat))
return matrix
# @staticmethod
# def angle_pose_to_mat(pose):
# assert len(pose) == 1
# if USE_OPENRAVE:
# q = quatFromAxisAngle((0, 0, pose)).tolist()
# matrix = matrixFromPose(q + pos)
# else:
# quat = T.euler_to_quaternion([0, 0, pose], order='xyzw')
# matrix = T.pose2mat((pos, quat))
# return matrix
@staticmethod
def mat_to_base_pose(mat):
pose = poseFromMatrix(mat)
x = pose[4]
y = pose[5]
rot = axisAngleFromRotationMatrix(mat)[2]
return np.array([x,y,rot])
@staticmethod
def obj_pose_from_transform(transform):
trans = transform[:3,3]
rot_matrix = transform[:3,:3]
yaw, pitch, roll = OpenRAVEBody._ypr_from_rot_matrix(rot_matrix)
return np.array((trans[0], trans[1], trans[2], yaw, pitch, roll))
@staticmethod
def transform_from_obj_pose(pose, rotation = np.array([0,0,0])):
x, y, z = pose
if len(rotation) == 4:
rotation = T.quaternion_to_euler(rotation, order='xyzw')
rotation = [rotation[2], rotation[1], rotation[0]]
Rz, Ry, Rx = OpenRAVEBody._axis_rot_matrices(pose, rotation)
rot_mat = np.dot(Rz, np.dot(Ry, Rx))
matrix = np.eye(4)
matrix[:3,:3] = rot_mat
matrix[:3,3] = [x,y,z]
return matrix
@staticmethod
def _axis_rot_matrices(pose, rotation):
x, y, z = pose
alpha, beta, gamma = rotation
Rz_2d = np.array([[cos(alpha), -sin(alpha)], [sin(alpha), cos(alpha)]])
Ry_2d = np.array([[cos(beta), sin(beta)], [-sin(beta), cos(beta)]])
Rx_2d = np.array([[cos(gamma), -sin(gamma)], [sin(gamma), cos(gamma)]])
I = np.eye(3)
Rz = I.copy()
Rz[:2,:2] = Rz_2d
Ry = I.copy()
Ry[[[0],[2]],[0,2]] = Ry_2d
Rx = I.copy()
Rx[1:3,1:3] = Rx_2d
return Rz, Ry, Rx
@staticmethod
def _ypr_from_rot_matrix(r):
# alpha
yaw = atan2(r[1,0], r[0,0])
# beta
pitch = atan2(-r[2,0],np.sqrt(r[2,1]**2+r[2,2]**2))
# gamma
roll = atan2(r[2,1], r[2,2])
return (yaw, pitch, roll)
@staticmethod
def quat_from_v1_to_v2(v1, v2):
v1, v2 = np.array(v1), np.array(v2)
xyz = np.cross(v1, v2)
len1 = np.sum(v1**2)
len2 = np.sum(v2**2)
w = np.sqrt(len1 * len2) + np.dot(v1, v2)
quat = np.concatenate([xyz, [w]])
if np.all(np.abs(quat) < 1e-5):
v1 = v1 / np.linalg.norm(v1)
mid_axis = [1, 0, 0] if np.abs(np.dot([1,0,0], v1)) < 1-1e-3 else [0, 1, 0]
quat1 = OpenRAVEBody.quat_from_v1_to_v2(v1, mid_axis)
quat2 = OpenRAVEBody.quat_from_v1_to_v2(mid_axis, v2)
mat1 = T.quat2mat(quat1)
mat2 = T.quat2mat(quat2)
quat = T.mat2quat(mat1.dot(mat2))
quat /= np.linalg.norm(quat)
return quat
#@staticmethod
#def get_ik_transform(pos, rot, right_arm = True):
# trans = OpenRAVEBody.transform_from_obj_pose(pos, rot)
# # Openravepy flip the rotation axis by 90 degree, thus we need to change it back
# if right_arm:
# quat = T.euler_to_quaternion([0, np.pi/2, 0], order='xyzw')
# else:
# quat = T.euler_to_quaternion([0, -np.pi/2, 0], order='xyzw')
# rot_mat = T.pose2mat([[0, 0, 0], quat])
# trans_mat = trans[:3, :3].dot(rot_mat[:3, :3])
# trans[:3, :3] = trans_mat
# return trans
def get_link_pose(self, link_id, euler=True):
info = P.getLinkState(self.body_id, link_id)
pos, orn = info[0], info[1]
if euler:
orn = T.quaternion_to_euler(orn, order='xyzw')
return pos, orn
def current_pose(self, euler=True):
pos, orn = P.getBasePositionAndOrientation(self.body_id)
if euler:
orn = T.quaternion_to_euler(orn, order='xyzw')
return pos, orn
def set_from_param(self, param, t):
if param.is_symbol(): t = 0
pos = param.pose[:,t] if not param.is_symbol() else param.value[:,0]
if 'Robot' in param.get_type(True) or 'RobotPose' in param.get_type(True):
dof_map = {}
geom = param.openrave_body._geom
for arm in geom.arms:
dof_map[arm] = getattr(param, arm)[:,t]
for gripper in geom.grippers:
dof_map[gripper] = getattr(param, gripper)[:,t]
self.set_dof(dof_map)
if hasattr(param, 'rotation'):
self.set_pose(pos, param.rotation[:,t])
else:
self.set_pose(pos)
else:
self.set_pose(pos, param.rotation[:,t])
def get_ik_from_pose(self, pos, rot, manip_name, use6d=True, multiple=0, maxIter=1024, bnds=None):
quat = rot if (rot is None or len(rot) == 4) else T.euler_to_quaternion(rot, order='xyzw')
pos = np.array(pos).tolist()
quat = np.array(quat).tolist()
if bnds is None:
lb, ub = self._geom.get_arm_bnds()
else:
true_lb, true_ub = self._geom.get_arm_bnds()
lb, ub = bnds
if len(lb) < len(true_lb):
lb = np.r_[lb, -10*np.ones(len(true_lb) - len(lb))]
if len(ub) < len(true_ub):
ub = np.r_[ub, 10*np.ones(len(true_ub) - len(ub))]
lb = np.maximum(lb, true_lb).tolist()
ub = np.minimum(ub, true_ub).tolist()
ranges = (np.array(ub) - np.array(lb)).tolist()
jnt_ids = sorted(self._geom.get_free_inds())
jnts = P.getJointStates(self.body_id, jnt_ids)
rest_poses = [j[0] for j in jnts]
cur_jnts = rest_poses
manip_id = self._geom.get_ee_link(manip_name)
damp = (0.1 * np.ones(len(jnt_ids))).tolist()
joint_pos = P.calculateInverseKinematics(self.body_id,
manip_id,
pos,
quat,
lowerLimits=lb,
upperLimits=ub,
jointRanges=ranges,
restPoses=rest_poses,
jointDamping=damp,
maxNumIterations=maxIter)
inds = list(self._geom.get_free_inds(manip_name))
joint_pos = np.array(joint_pos)[inds].tolist()
lb, ub = self._geom.get_joint_limits(manip_name)
joint_pos = np.maximum(np.minimum(joint_pos, ub), lb)
if not multiple: return joint_pos
poses = [joint_pos]
for _ in range(multiple):
rest_poses = (np.array(cur_jnts) + 5 * (np.random.uniform(size=len(lb)) - 0.5) * ranges).tolist()
joint_pos = P.calculateInverseKinematics(self.body_id,
manip_id,
pos,
quat,
lb,
ub,
ranges,
rest_poses,
maxNumIterations=maxIter)
joint_pos = np.array(joint_pos)[inds].tolist()
poses.append(joint_pos)
return poses
def fwd_kinematics(self, manip_name, dof_map=None, mat_result=False):
if dof_map is not None:
self.set_dof(dof_map)
ee_link = self._geom.get_ee_link(manip_name)
link_state = P.getLinkState(self.body_id, ee_link)
pos = link_state[0]
quat = link_state[1]
if mat_result:
return OpenRAVEBody.transform_from_obj_pose(pos, quat)
return {'pos': pos, 'quat': quat}
def param_fwd_kinematics(self, param, manip_names, t, mat_result=False):
if not isinstance(self._geom, Robot): return
attrs = list(param._attr_types.keys())
attr_vals = {attr: getattr(param, attr)[:, t] for attr in attrs if attr in self._geom.dof_map}
param.openrave_body.set_dof(attr_vals)
result = {}
for manip_name in manip_names:
result[manip_name] = self.fwd_kinematics(manip_name, mat_result=mat_result)
return result
| 42.183976
| 130
| 0.547869
|
56442e8f850fd9c7f136e29c63d3d6d34743e306
| 16,963
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/fCoEFabricLogoLsAcc_template.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 20
|
2019-05-07T01:59:14.000Z
|
2022-02-11T05:24:47.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/fCoEFabricLogoLsAcc_template.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 60
|
2019-04-03T18:59:35.000Z
|
2022-02-22T12:05:05.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/fCoEFabricLogoLsAcc_template.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 13
|
2019-05-20T10:48:31.000Z
|
2021-10-06T07:45:44.000Z
|
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class FCoEFabricLogoLsAcc(Base):
__slots__ = ()
_SDM_NAME = 'fCoEFabricLogoLsAcc'
_SDM_ATT_MAP = {
'FcoeHeaderVersion': 'fCoEFabricLogoLsAcc.header.fcoeHeader.version-1',
'FcoeHeaderReserved': 'fCoEFabricLogoLsAcc.header.fcoeHeader.reserved-2',
'FcoeHeaderESOF': 'fCoEFabricLogoLsAcc.header.fcoeHeader.eSOF-3',
'ExtendedLinkServicesExtendedLinkServiceInfo': 'fCoEFabricLogoLsAcc.header.fcHeader.RCtl.extendedLinkServices.extendedLinkServiceInfo-4',
'FcHeaderDId': 'fCoEFabricLogoLsAcc.header.fcHeader.DId-5',
'FcHeaderCsCtlPriority': 'fCoEFabricLogoLsAcc.header.fcHeader.CsCtlPriority-6',
'FcHeaderSId': 'fCoEFabricLogoLsAcc.header.fcHeader.SId-7',
'FcHeaderType': 'fCoEFabricLogoLsAcc.header.fcHeader.Type-8',
'FCtlExchangeContext': 'fCoEFabricLogoLsAcc.header.fcHeader.FCtl.fCtl.exchangeContext-9',
'FCtlSequenceContext': 'fCoEFabricLogoLsAcc.header.fcHeader.FCtl.fCtl.sequenceContext-10',
'FCtlFirstSequence': 'fCoEFabricLogoLsAcc.header.fcHeader.FCtl.fCtl.firstSequence-11',
'FCtlLastSequence': 'fCoEFabricLogoLsAcc.header.fcHeader.FCtl.fCtl.lastSequence-12',
'FCtlEndSequence': 'fCoEFabricLogoLsAcc.header.fcHeader.FCtl.fCtl.endSequence-13',
'FCtlEndConnection': 'fCoEFabricLogoLsAcc.header.fcHeader.FCtl.fCtl.endConnection-14',
'FCtlCsCtlPriority': 'fCoEFabricLogoLsAcc.header.fcHeader.FCtl.fCtl.csCtlPriority-15',
'FCtlSequenceInitiative': 'fCoEFabricLogoLsAcc.header.fcHeader.FCtl.fCtl.sequenceInitiative-16',
'FCtlFcXidReassigned': 'fCoEFabricLogoLsAcc.header.fcHeader.FCtl.fCtl.fcXidReassigned-17',
'FCtlFcInvalidateXid': 'fCoEFabricLogoLsAcc.header.fcHeader.FCtl.fCtl.fcInvalidateXid-18',
'FCtlAckForm': 'fCoEFabricLogoLsAcc.header.fcHeader.FCtl.fCtl.ackForm-19',
'FCtlFcDataCompression': 'fCoEFabricLogoLsAcc.header.fcHeader.FCtl.fCtl.fcDataCompression-20',
'FCtlFcDataEncryption': 'fCoEFabricLogoLsAcc.header.fcHeader.FCtl.fCtl.fcDataEncryption-21',
'FCtlRetransmittedSequence': 'fCoEFabricLogoLsAcc.header.fcHeader.FCtl.fCtl.retransmittedSequence-22',
'FCtlUnidirectionalTransmit': 'fCoEFabricLogoLsAcc.header.fcHeader.FCtl.fCtl.unidirectionalTransmit-23',
'FCtlContinueSeqCondition': 'fCoEFabricLogoLsAcc.header.fcHeader.FCtl.fCtl.continueSeqCondition-24',
'FCtlAbortSeqCondition': 'fCoEFabricLogoLsAcc.header.fcHeader.FCtl.fCtl.abortSeqCondition-25',
'FCtlRelativeOffsetPresent': 'fCoEFabricLogoLsAcc.header.fcHeader.FCtl.fCtl.relativeOffsetPresent-26',
'FCtlExchangeReassembly': 'fCoEFabricLogoLsAcc.header.fcHeader.FCtl.fCtl.exchangeReassembly-27',
'FCtlFillBytes': 'fCoEFabricLogoLsAcc.header.fcHeader.FCtl.fCtl.fillBytes-28',
'FcHeaderSeqId': 'fCoEFabricLogoLsAcc.header.fcHeader.SeqId-29',
'FcHeaderDfCtl': 'fCoEFabricLogoLsAcc.header.fcHeader.DfCtl-30',
'FcHeaderSeqCnt': 'fCoEFabricLogoLsAcc.header.fcHeader.SeqCnt-31',
'FcHeaderOxId': 'fCoEFabricLogoLsAcc.header.fcHeader.OxId-32',
'FcHeaderRxId': 'fCoEFabricLogoLsAcc.header.fcHeader.RxId-33',
'FcHeaderParameter': 'fCoEFabricLogoLsAcc.header.fcHeader.Parameter-34',
'FcElsCommandCodeFcElsCommandCodeLsAcc': 'fCoEFabricLogoLsAcc.header.FcEls.FcElsAcceptReject.FcElsCommandCode.FcElsCommandCodeLsAcc-35',
'FcElsAcceptRejectFcElsAcceptRejectReserved': 'fCoEFabricLogoLsAcc.header.FcEls.FcElsAcceptReject.FcElsAcceptRejectReserved-36',
}
def __init__(self, parent, list_op=False):
super(FCoEFabricLogoLsAcc, self).__init__(parent, list_op)
@property
def FcoeHeaderVersion(self):
"""
Display Name: Version
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcoeHeaderVersion']))
@property
def FcoeHeaderReserved(self):
"""
Display Name: Reserved
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcoeHeaderReserved']))
@property
def FcoeHeaderESOF(self):
"""
Display Name: E-SOF
Default Value: 54
Value Format: decimal
Available enum values: SOFf - Fabric, 40, SOFi4 - Initiate Class 4, 41, SOFi2 - Initiate Class 2, 45, SOFi3 - Initiate Class 3, 46, SOFn4 - Normal Class 4, 49, SOFn2 - Normal Class 2, 53, SOFn3 - Normal Class 3, 54, SOFc4 - Connect Class 4, 57, SOFn1 - Normal Class 1 or 6, 250
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcoeHeaderESOF']))
@property
def ExtendedLinkServicesExtendedLinkServiceInfo(self):
"""
Display Name: Information
Default Value: 35
Value Format: decimal
Available enum values: Solicited Data, 33, Request, 34, Reply, 35
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedLinkServicesExtendedLinkServiceInfo']))
@property
def FcHeaderDId(self):
"""
Display Name: D_ID
Default Value: 0x000001
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderDId']))
@property
def FcHeaderCsCtlPriority(self):
"""
Display Name: CS_CTL/Priority
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderCsCtlPriority']))
@property
def FcHeaderSId(self):
"""
Display Name: S_ID
Default Value: 0xFFFFFE
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderSId']))
@property
def FcHeaderType(self):
"""
Display Name: Type
Default Value: 0x01
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderType']))
@property
def FCtlExchangeContext(self):
"""
Display Name: Exchange Context
Default Value: 0
Value Format: decimal
Available enum values: Originator, 0, Receipient, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlExchangeContext']))
@property
def FCtlSequenceContext(self):
"""
Display Name: Sequence Context
Default Value: 0
Value Format: decimal
Available enum values: Initiator, 0, Receipient, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlSequenceContext']))
@property
def FCtlFirstSequence(self):
"""
Display Name: First Sequence
Default Value: 0
Value Format: decimal
Available enum values: Other, 0, First, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlFirstSequence']))
@property
def FCtlLastSequence(self):
"""
Display Name: Last Sequence
Default Value: 0
Value Format: decimal
Available enum values: Other, 0, Last, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlLastSequence']))
@property
def FCtlEndSequence(self):
"""
Display Name: End Sequence
Default Value: 0
Value Format: decimal
Available enum values: Other, 0, Last, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlEndSequence']))
@property
def FCtlEndConnection(self):
"""
Display Name: End Connection
Default Value: 0
Value Format: decimal
Available enum values: Alive, 0, Pending, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlEndConnection']))
@property
def FCtlCsCtlPriority(self):
"""
Display Name: CS_CTL/Priority Enable
Default Value: 0
Value Format: decimal
Available enum values: CS_CTL, 0, Priority, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlCsCtlPriority']))
@property
def FCtlSequenceInitiative(self):
"""
Display Name: Sequence Initiative
Default Value: 0
Value Format: decimal
Available enum values: Hold, 0, Transfer, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlSequenceInitiative']))
@property
def FCtlFcXidReassigned(self):
"""
Display Name: FC XID Reassigned
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlFcXidReassigned']))
@property
def FCtlFcInvalidateXid(self):
"""
Display Name: FC Invalidate XID
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlFcInvalidateXid']))
@property
def FCtlAckForm(self):
"""
Display Name: ACK_Form
Default Value: 0
Value Format: decimal
Available enum values: No assistance provided, 0, ACK_1 Required, 1, reserved, 2, Ack_0 Required, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlAckForm']))
@property
def FCtlFcDataCompression(self):
"""
Display Name: FC Data Compression
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlFcDataCompression']))
@property
def FCtlFcDataEncryption(self):
"""
Display Name: FC Data Encryption
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlFcDataEncryption']))
@property
def FCtlRetransmittedSequence(self):
"""
Display Name: Retransmitted Sequence
Default Value: 0
Value Format: decimal
Available enum values: Original, 0, Retransmission, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlRetransmittedSequence']))
@property
def FCtlUnidirectionalTransmit(self):
"""
Display Name: Unidirectional Transmit
Default Value: 0
Value Format: decimal
Available enum values: Bi-directional, 0, Unidirectional, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlUnidirectionalTransmit']))
@property
def FCtlContinueSeqCondition(self):
"""
Display Name: Continue Sequence Condition
Default Value: 0
Value Format: decimal
Available enum values: No information, 0, Sequence to follow-immediately, 1, Squence to follow-soon, 2, Sequence to follow-delayed, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlContinueSeqCondition']))
@property
def FCtlAbortSeqCondition(self):
"""
Display Name: Abort Sequence Condition
Default Value: 0
Value Format: decimal
Available enum values: 0x00, 0, 0x01, 1, 0x10, 2, 0x11, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlAbortSeqCondition']))
@property
def FCtlRelativeOffsetPresent(self):
"""
Display Name: Relative offset present
Default Value: 0
Value Format: decimal
Available enum values: Parameter field defined, 0, Relative offset, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlRelativeOffsetPresent']))
@property
def FCtlExchangeReassembly(self):
"""
Display Name: Exchange Reassembly
Default Value: 0
Value Format: decimal
Available enum values: off, 0, on, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlExchangeReassembly']))
@property
def FCtlFillBytes(self):
"""
Display Name: Fill Bytes
Default Value: 0
Value Format: decimal
Available enum values: 0 bytes of fill, 0, 1 bytes of fill, 1, 2 bytes of fill, 2, 3 bytes of fill, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlFillBytes']))
@property
def FcHeaderSeqId(self):
"""
Display Name: SEQ_ID
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderSeqId']))
@property
def FcHeaderDfCtl(self):
"""
Display Name: DF_CTL
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderDfCtl']))
@property
def FcHeaderSeqCnt(self):
"""
Display Name: SEQ_CNT
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderSeqCnt']))
@property
def FcHeaderOxId(self):
"""
Display Name: OX_ID
Default Value: 0x0000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderOxId']))
@property
def FcHeaderRxId(self):
"""
Display Name: RX_ID
Default Value: 0xFFFF
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderRxId']))
@property
def FcHeaderParameter(self):
"""
Display Name: Parameter
Default Value: 0x00000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderParameter']))
@property
def FcElsCommandCodeFcElsCommandCodeLsAcc(self):
"""
Display Name: LS_ACC
Default Value: 0x02
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcElsCommandCodeFcElsCommandCodeLsAcc']))
@property
def FcElsAcceptRejectFcElsAcceptRejectReserved(self):
"""
Display Name: Reserved
Default Value: 0x000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcElsAcceptRejectFcElsAcceptRejectReserved']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| 39.085253
| 285
| 0.677887
|
08b6673fc0e86d906909d187766d0771c077bb6e
| 4,834
|
py
|
Python
|
docs/conf.py
|
nmdickson/ssptools
|
52a186fff7d7080d8339897a30742f20566b8a1f
|
[
"MIT"
] | 1
|
2020-10-12T14:56:03.000Z
|
2020-10-12T14:56:03.000Z
|
docs/conf.py
|
nmdickson/ssptools
|
52a186fff7d7080d8339897a30742f20566b8a1f
|
[
"MIT"
] | 4
|
2019-05-02T09:36:11.000Z
|
2021-01-14T20:13:11.000Z
|
docs/conf.py
|
nmdickson/ssptools
|
52a186fff7d7080d8339897a30742f20566b8a1f
|
[
"MIT"
] | 2
|
2020-10-17T20:34:27.000Z
|
2021-01-27T03:15:48.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ssptools documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import ssptools
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SSPTools'
copyright = u"2018, Eduardo Balbinot"
author = u"Eduardo Balbinot"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = ssptools.__version__
# The full version, including alpha/beta/rc tags.
release = ssptools.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ssptoolsdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ssptools.tex',
u'SSPTools Documentation',
u'Eduardo Balbinot', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ssptools',
u'SSPTools Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ssptools',
u'SSPTools Documentation',
author,
'ssptools',
'One line description of project.',
'Miscellaneous'),
]
| 29.47561
| 77
| 0.684733
|
49a654c42ea81ba303e43c537cde6e6d8568f199
| 29,683
|
py
|
Python
|
watertap/core/tests/test_zero_order_unit.py
|
bknueven/proteuslib
|
1d00f48b3bc4b868c45f44c38acf8203a910931a
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
watertap/core/tests/test_zero_order_unit.py
|
bknueven/proteuslib
|
1d00f48b3bc4b868c45f44c38acf8203a910931a
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
watertap/core/tests/test_zero_order_unit.py
|
bknueven/proteuslib
|
1d00f48b3bc4b868c45f44c38acf8203a910931a
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
"""
Tests for general zero-order proeprty package
"""
import pytest
from idaes.core import declare_process_block_class, FlowsheetBlock
from idaes.core.util.model_statistics import degrees_of_freedom
from idaes.core.util.testing import initialization_tester
from idaes.core.util import get_solver
import idaes.core.util.scaling as iscale
from idaes.core.util.exceptions import ConfigurationError
from pyomo.environ import (Block,
ConcreteModel,
Constraint,
SolverStatus,
TerminationCondition,
value,
Var)
from pyomo.network import Port
from pyomo.util.check_units import assert_units_consistent
from watertap.core.zero_order_unit import SITOBaseData
from watertap.core.zero_order_properties import \
WaterParameterBlock, WaterStateBlock
solver = get_solver()
class TestSITOConfigurationErrors:
@pytest.fixture
def model(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.params = WaterParameterBlock(
default={"solute_list": ["A", "B", "C"]})
m.fs.params.del_component(m.fs.params.phase_list)
m.fs.params.del_component(m.fs.params.solvent_set)
m.fs.params.del_component(m.fs.params.solute_set)
m.fs.params.del_component(m.fs.params.component_list)
return m
@declare_process_block_class("DerivedSITO0")
class DerivedSITOData0(SITOBaseData):
def build(self):
self._has_deltaP_outlet = True
self._has_deltaP_waste = True
super().build()
@pytest.mark.unit
def test_phase_list(self, model):
model.fs.params.phase_list = ["foo"]
with pytest.raises(ConfigurationError,
match="fs.unit configured with invalid property "
"package. Zero-order models only support property "
"packages with a single phase named 'Liq'."):
model.fs.unit = DerivedSITO0(
default={"property_package": model.fs.params})
@pytest.mark.unit
def test_no_solvent_set(self, model):
model.fs.params.phase_list = ["Liq"]
with pytest.raises(ConfigurationError,
match="fs.unit configured with invalid property "
"package. Zero-order models only support property "
"packages which include 'H2O' as the only Solvent."
):
model.fs.unit = DerivedSITO0(
default={"property_package": model.fs.params})
@pytest.mark.unit
def test_invalid_solvent_set(self, model):
model.fs.params.phase_list = ["Liq"]
model.fs.params.solvent_set = ["foo"]
with pytest.raises(ConfigurationError,
match="fs.unit configured with invalid property "
"package. Zero-order models only support property "
"packages which include 'H2O' as the only Solvent."
):
model.fs.unit = DerivedSITO0(
default={"property_package": model.fs.params})
@pytest.mark.unit
def test_no_solute_set(self, model):
model.fs.params.phase_list = ["Liq"]
model.fs.params.solvent_set = ["H2O"]
with pytest.raises(ConfigurationError,
match="fs.unit configured with invalid property "
"package. Zero-order models require property "
"packages to declare all dissolved species as "
"Solutes."):
model.fs.unit = DerivedSITO0(
default={"property_package": model.fs.params})
@pytest.mark.unit
def test_non_solvent_or_solute(self, model):
model.fs.params.phase_list = ["Liq"]
model.fs.params.solvent_set = ["H2O"]
model.fs.params.solute_set = ["A", "B", "C"]
model.fs.params.component_list = ["H2O", "A", "B", "C", "foo"]
with pytest.raises(ConfigurationError,
match="fs.unit configured with invalid property "
"package. Zero-order models only support `H2O` as "
"a solvent and all other species as Solutes."):
model.fs.unit = DerivedSITO0(
default={"property_package": model.fs.params})
@pytest.mark.unit
def test_no_has_deltaP_outlet():
@declare_process_block_class("DerivedSITO1")
class DerivedSITOData1(SITOBaseData):
def build(self):
self._has_deltaP_waste = True
super().build()
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.water_props = WaterParameterBlock(
default={"solute_list": ["A", "B", "C"]})
with pytest.raises(NotImplementedError,
match="fs.unit derived class has not been implemented "
"_has_deltaP_outlet."):
m.fs.unit = DerivedSITO1(
default={"property_package": m.fs.water_props})
@pytest.mark.unit
def test_no_has_deltaP_waste():
@declare_process_block_class("DerivedSITO2")
class DerivedSITOData2(SITOBaseData):
def build(self):
self._has_deltaP_outlet = True
super().build()
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.water_props = WaterParameterBlock(
default={"solute_list": ["A", "B", "C"]})
with pytest.raises(NotImplementedError,
match="fs.unit derived class has not been implemented "
"_has_deltaP_waste."):
m.fs.unit = DerivedSITO2(
default={"property_package": m.fs.water_props})
class TestPressureChange:
@declare_process_block_class("DerivedSITO3")
class DerivedSITOData3(SITOBaseData):
def build(self):
self._has_deltaP_outlet = True
self._has_deltaP_waste = True
super().build()
@pytest.fixture(scope="module")
def model(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.water_props = WaterParameterBlock(
default={"solute_list": ["A", "B", "C"]})
m.fs.unit = DerivedSITO3(
default={"property_package": m.fs.water_props})
m.fs.unit.inlet.flow_vol.fix(42)
m.fs.unit.inlet.conc_mass_comp[0, "A"].fix(10)
m.fs.unit.inlet.conc_mass_comp[0, "B"].fix(20)
m.fs.unit.inlet.conc_mass_comp[0, "C"].fix(30)
m.fs.unit.inlet.temperature.fix(303.15)
m.fs.unit.inlet.pressure.fix(1.5e5)
m.fs.unit.recovery_vol.fix(0.8)
m.fs.unit.removal_mass_solute[0, "A"].fix(0.1)
m.fs.unit.removal_mass_solute[0, "B"].fix(0.2)
m.fs.unit.removal_mass_solute[0, "C"].fix(0.3)
m.fs.unit.deltaP_outlet.fix(1000)
m.fs.unit.deltaP_waste.fix(2000)
return m
@pytest.mark.unit
def test_build(self, model):
assert isinstance(model.fs.unit.properties_in, WaterStateBlock)
assert isinstance(model.fs.unit.properties_out, WaterStateBlock)
assert isinstance(model.fs.unit.properties_waste, WaterStateBlock)
assert isinstance(model.fs.unit.inlet, Port)
assert isinstance(model.fs.unit.outlet, Port)
assert isinstance(model.fs.unit.waste, Port)
assert isinstance(model.fs.unit.recovery_vol, Var)
assert len(model.fs.unit.recovery_vol) == 1
assert isinstance(model.fs.unit.removal_mass_solute, Var)
assert len(model.fs.unit.removal_mass_solute) == 3
assert isinstance(model.fs.unit.deltaP_outlet, Var)
assert len(model.fs.unit.deltaP_outlet) == 1
assert isinstance(model.fs.unit.deltaP_waste, Var)
assert len(model.fs.unit.deltaP_waste) == 1
assert isinstance(model.fs.unit.water_recovery_equation, Constraint)
assert len(model.fs.unit.water_recovery_equation) == 1
assert isinstance(model.fs.unit.flow_balance, Constraint)
assert len(model.fs.unit.flow_balance) == 1
assert isinstance(model.fs.unit.solute_removal_equation, Constraint)
assert len(model.fs.unit.solute_removal_equation) == 3
assert isinstance(model.fs.unit.solute_outlet_equation, Constraint)
assert len(model.fs.unit.solute_outlet_equation) == 3
assert isinstance(model.fs.unit.outlet_pressure_constraint, Constraint)
assert len(model.fs.unit.outlet_pressure_constraint) == 1
assert isinstance(model.fs.unit.waste_pressure_constraint, Constraint)
assert len(model.fs.unit.waste_pressure_constraint) == 1
assert isinstance(model.fs.unit.outlet_temperature_equality,
Constraint)
assert len(model.fs.unit.outlet_temperature_equality) == 1
assert isinstance(model.fs.unit.waste_temperature_equality, Constraint)
assert len(model.fs.unit.waste_temperature_equality) == 1
@pytest.mark.unit
def test_degrees_of_freedom(self, model):
assert degrees_of_freedom(model) == 0
@pytest.mark.component
def test_unit_consistency(self, model):
assert_units_consistent(model)
@pytest.mark.component
def test_scaling(self, model):
iscale.calculate_scaling_factors(model)
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.water_recovery_equation[0]) == 1e3
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.flow_balance[0]) == 1e3
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_removal_equation[0, "A"]) == 1e5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_removal_equation[0, "B"]) == 1e5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_removal_equation[0, "C"]) == 1e5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_outlet_equation[0, "A"]) == 1e5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_outlet_equation[0, "B"]) == 1e5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_outlet_equation[0, "C"]) == 1e5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.outlet_pressure_constraint[0]) == 1e-5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.waste_pressure_constraint[0]) == 1e-5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.outlet_temperature_equality[0]) == 1e-2
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.outlet_temperature_equality[0]) == 1e-2
@pytest.mark.component
def test_initialization(self, model):
initialization_tester(model)
@pytest.mark.component
def test_solve(self, model):
results = solver.solve(model)
# Check for optimal solution
assert results.solver.termination_condition == \
TerminationCondition.optimal
assert results.solver.status == SolverStatus.ok
@pytest.mark.component
def test_solution(self, model):
assert (pytest.approx(33.6, rel=1e-5) ==
value(model.fs.unit.outlet.flow_vol[0]))
assert (pytest.approx(8.4, rel=1e-5) ==
value(model.fs.unit.waste.flow_vol[0]))
assert (pytest.approx(11.25, rel=1e-5) ==
value(model.fs.unit.outlet.conc_mass_comp[0, "A"]))
assert (pytest.approx(5, rel=1e-5) ==
value(model.fs.unit.waste.conc_mass_comp[0, "A"]))
assert (pytest.approx(20, rel=1e-5) ==
value(model.fs.unit.outlet.conc_mass_comp[0, "B"]))
assert (pytest.approx(20, rel=1e-5) ==
value(model.fs.unit.waste.conc_mass_comp[0, "B"]))
assert (pytest.approx(26.25, rel=1e-5) ==
value(model.fs.unit.outlet.conc_mass_comp[0, "C"]))
assert (pytest.approx(45, rel=1e-5) ==
value(model.fs.unit.waste.conc_mass_comp[0, "C"]))
assert (pytest.approx(151000, rel=1e-5) ==
value(model.fs.unit.outlet.pressure[0]))
assert (pytest.approx(152000, rel=1e-5) ==
value(model.fs.unit.waste.pressure[0]))
assert (pytest.approx(303.15, rel=1e-5) ==
value(model.fs.unit.outlet.temperature[0]))
assert (pytest.approx(303.15, rel=1e-5) ==
value(model.fs.unit.waste.temperature[0]))
@pytest.mark.component
def test_conservation(self, model):
assert abs(value(model.fs.unit.inlet.flow_vol[0] -
model.fs.unit.outlet.flow_vol[0] -
model.fs.unit.waste.flow_vol[0])) <= 1e-6
for j in model.fs.water_props.solute_set:
assert (abs(value(model.fs.unit.inlet.flow_vol[0] *
model.fs.unit.inlet.conc_mass_comp[0, j] -
model.fs.unit.outlet.flow_vol[0] *
model.fs.unit.outlet.conc_mass_comp[0, j] -
model.fs.unit.waste.flow_vol[0] *
model.fs.unit.waste.conc_mass_comp[0, j]))
<= 1e-6)
class TestNoPressureChangeOutlet:
@declare_process_block_class("DerivedSITO4")
class DerivedSITOData4(SITOBaseData):
def build(self):
self._has_deltaP_outlet = False
self._has_deltaP_waste = True
super().build()
@pytest.fixture(scope="module")
def model(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.water_props = WaterParameterBlock(
default={"solute_list": ["A", "B", "C"]})
m.fs.unit = DerivedSITO4(
default={"property_package": m.fs.water_props})
m.fs.unit.inlet.flow_vol.fix(42)
m.fs.unit.inlet.conc_mass_comp[0, "A"].fix(10)
m.fs.unit.inlet.conc_mass_comp[0, "B"].fix(20)
m.fs.unit.inlet.conc_mass_comp[0, "C"].fix(30)
m.fs.unit.inlet.temperature.fix(303.15)
m.fs.unit.inlet.pressure.fix(1.5e5)
m.fs.unit.recovery_vol.fix(0.8)
m.fs.unit.removal_mass_solute[0, "A"].fix(0.1)
m.fs.unit.removal_mass_solute[0, "B"].fix(0.2)
m.fs.unit.removal_mass_solute[0, "C"].fix(0.3)
m.fs.unit.deltaP_waste.fix(2000)
return m
@pytest.mark.unit
def test_build(self, model):
assert isinstance(model.fs.unit.properties_in, WaterStateBlock)
assert isinstance(model.fs.unit.properties_out, WaterStateBlock)
assert isinstance(model.fs.unit.properties_waste, WaterStateBlock)
assert isinstance(model.fs.unit.inlet, Port)
assert isinstance(model.fs.unit.outlet, Port)
assert isinstance(model.fs.unit.waste, Port)
assert isinstance(model.fs.unit.recovery_vol, Var)
assert len(model.fs.unit.recovery_vol) == 1
assert isinstance(model.fs.unit.removal_mass_solute, Var)
assert len(model.fs.unit.removal_mass_solute) == 3
assert not hasattr(model.fs.unit, "deltaP_outlet")
assert isinstance(model.fs.unit.deltaP_waste, Var)
assert len(model.fs.unit.deltaP_waste) == 1
assert isinstance(model.fs.unit.water_recovery_equation, Constraint)
assert len(model.fs.unit.water_recovery_equation) == 1
assert isinstance(model.fs.unit.flow_balance, Constraint)
assert len(model.fs.unit.flow_balance) == 1
assert isinstance(model.fs.unit.solute_removal_equation, Constraint)
assert len(model.fs.unit.solute_removal_equation) == 3
assert isinstance(model.fs.unit.solute_outlet_equation, Constraint)
assert len(model.fs.unit.solute_outlet_equation) == 3
assert isinstance(model.fs.unit.outlet_pressure_constraint, Constraint)
assert len(model.fs.unit.outlet_pressure_constraint) == 1
assert isinstance(model.fs.unit.waste_pressure_constraint, Constraint)
assert len(model.fs.unit.waste_pressure_constraint) == 1
assert isinstance(model.fs.unit.outlet_temperature_equality,
Constraint)
assert len(model.fs.unit.outlet_temperature_equality) == 1
assert isinstance(model.fs.unit.waste_temperature_equality, Constraint)
assert len(model.fs.unit.waste_temperature_equality) == 1
@pytest.mark.unit
def test_degrees_of_freedom(self, model):
assert degrees_of_freedom(model) == 0
@pytest.mark.component
def test_unit_consistency(self, model):
assert_units_consistent(model)
@pytest.mark.component
def test_scaling(self, model):
iscale.calculate_scaling_factors(model)
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.water_recovery_equation[0]) == 1e3
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.flow_balance[0]) == 1e3
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_removal_equation[0, "A"]) == 1e5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_removal_equation[0, "B"]) == 1e5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_removal_equation[0, "C"]) == 1e5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_outlet_equation[0, "A"]) == 1e5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_outlet_equation[0, "B"]) == 1e5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_outlet_equation[0, "C"]) == 1e5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.outlet_pressure_constraint[0]) == 1e-5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.waste_pressure_constraint[0]) == 1e-5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.outlet_temperature_equality[0]) == 1e-2
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.outlet_temperature_equality[0]) == 1e-2
@pytest.mark.component
def test_initialization(self, model):
initialization_tester(model)
@pytest.mark.component
def test_solve(self, model):
results = solver.solve(model)
# Check for optimal solution
assert results.solver.termination_condition == \
TerminationCondition.optimal
assert results.solver.status == SolverStatus.ok
@pytest.mark.component
def test_solution(self, model):
assert (pytest.approx(33.6, rel=1e-5) ==
value(model.fs.unit.outlet.flow_vol[0]))
assert (pytest.approx(8.4, rel=1e-5) ==
value(model.fs.unit.waste.flow_vol[0]))
assert (pytest.approx(11.25, rel=1e-5) ==
value(model.fs.unit.outlet.conc_mass_comp[0, "A"]))
assert (pytest.approx(5, rel=1e-5) ==
value(model.fs.unit.waste.conc_mass_comp[0, "A"]))
assert (pytest.approx(20, rel=1e-5) ==
value(model.fs.unit.outlet.conc_mass_comp[0, "B"]))
assert (pytest.approx(20, rel=1e-5) ==
value(model.fs.unit.waste.conc_mass_comp[0, "B"]))
assert (pytest.approx(26.25, rel=1e-5) ==
value(model.fs.unit.outlet.conc_mass_comp[0, "C"]))
assert (pytest.approx(45, rel=1e-5) ==
value(model.fs.unit.waste.conc_mass_comp[0, "C"]))
assert (pytest.approx(150000, rel=1e-5) ==
value(model.fs.unit.outlet.pressure[0]))
assert (pytest.approx(152000, rel=1e-5) ==
value(model.fs.unit.waste.pressure[0]))
assert (pytest.approx(303.15, rel=1e-5) ==
value(model.fs.unit.outlet.temperature[0]))
assert (pytest.approx(303.15, rel=1e-5) ==
value(model.fs.unit.waste.temperature[0]))
@pytest.mark.component
def test_conservation(self, model):
assert abs(value(model.fs.unit.inlet.flow_vol[0] -
model.fs.unit.outlet.flow_vol[0] -
model.fs.unit.waste.flow_vol[0])) <= 1e-6
for j in model.fs.water_props.solute_set:
assert (abs(value(model.fs.unit.inlet.flow_vol[0] *
model.fs.unit.inlet.conc_mass_comp[0, j] -
model.fs.unit.outlet.flow_vol[0] *
model.fs.unit.outlet.conc_mass_comp[0, j] -
model.fs.unit.waste.flow_vol[0] *
model.fs.unit.waste.conc_mass_comp[0, j]))
<= 1e-6)
class TestNoPressureChangeWaste:
@declare_process_block_class("DerivedSITO5")
class DerivedSITOData5(SITOBaseData):
def build(self):
self._has_deltaP_outlet = True
self._has_deltaP_waste = False
super().build()
@pytest.fixture(scope="module")
def model(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.water_props = WaterParameterBlock(
default={"solute_list": ["A", "B", "C"]})
m.fs.unit = DerivedSITO5(
default={"property_package": m.fs.water_props})
m.fs.unit.inlet.flow_vol.fix(42)
m.fs.unit.inlet.conc_mass_comp[0, "A"].fix(10)
m.fs.unit.inlet.conc_mass_comp[0, "B"].fix(20)
m.fs.unit.inlet.conc_mass_comp[0, "C"].fix(30)
m.fs.unit.inlet.temperature.fix(303.15)
m.fs.unit.inlet.pressure.fix(1.5e5)
m.fs.unit.recovery_vol.fix(0.8)
m.fs.unit.removal_mass_solute[0, "A"].fix(0.1)
m.fs.unit.removal_mass_solute[0, "B"].fix(0.2)
m.fs.unit.removal_mass_solute[0, "C"].fix(0.3)
m.fs.unit.deltaP_outlet.fix(1000)
return m
@pytest.mark.unit
def test_build(self, model):
assert isinstance(model.fs.unit.properties_in, WaterStateBlock)
assert isinstance(model.fs.unit.properties_out, WaterStateBlock)
assert isinstance(model.fs.unit.properties_waste, WaterStateBlock)
assert isinstance(model.fs.unit.inlet, Port)
assert isinstance(model.fs.unit.outlet, Port)
assert isinstance(model.fs.unit.waste, Port)
assert isinstance(model.fs.unit.recovery_vol, Var)
assert len(model.fs.unit.recovery_vol) == 1
assert isinstance(model.fs.unit.removal_mass_solute, Var)
assert len(model.fs.unit.removal_mass_solute) == 3
assert not hasattr(model.fs.unit, "deltaP_waste")
assert isinstance(model.fs.unit.deltaP_outlet, Var)
assert len(model.fs.unit.deltaP_outlet) == 1
assert isinstance(model.fs.unit.water_recovery_equation, Constraint)
assert len(model.fs.unit.water_recovery_equation) == 1
assert isinstance(model.fs.unit.flow_balance, Constraint)
assert len(model.fs.unit.flow_balance) == 1
assert isinstance(model.fs.unit.solute_removal_equation, Constraint)
assert len(model.fs.unit.solute_removal_equation) == 3
assert isinstance(model.fs.unit.solute_outlet_equation, Constraint)
assert len(model.fs.unit.solute_outlet_equation) == 3
assert isinstance(model.fs.unit.outlet_pressure_constraint, Constraint)
assert len(model.fs.unit.outlet_pressure_constraint) == 1
assert isinstance(model.fs.unit.waste_pressure_constraint, Constraint)
assert len(model.fs.unit.waste_pressure_constraint) == 1
assert isinstance(model.fs.unit.outlet_temperature_equality,
Constraint)
assert len(model.fs.unit.outlet_temperature_equality) == 1
assert isinstance(model.fs.unit.waste_temperature_equality, Constraint)
assert len(model.fs.unit.waste_temperature_equality) == 1
@pytest.mark.unit
def test_degrees_of_freedom(self, model):
assert degrees_of_freedom(model) == 0
@pytest.mark.component
def test_unit_consistency(self, model):
assert_units_consistent(model)
@pytest.mark.component
def test_scaling(self, model):
iscale.calculate_scaling_factors(model)
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.water_recovery_equation[0]) == 1e3
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.flow_balance[0]) == 1e3
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_removal_equation[0, "A"]) == 1e5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_removal_equation[0, "B"]) == 1e5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_removal_equation[0, "C"]) == 1e5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_outlet_equation[0, "A"]) == 1e5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_outlet_equation[0, "B"]) == 1e5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.solute_outlet_equation[0, "C"]) == 1e5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.outlet_pressure_constraint[0]) == 1e-5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.waste_pressure_constraint[0]) == 1e-5
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.outlet_temperature_equality[0]) == 1e-2
assert iscale.get_constraint_transform_applied_scaling_factor(
model.fs.unit.outlet_temperature_equality[0]) == 1e-2
@pytest.mark.component
def test_initialization(self, model):
initialization_tester(model)
@pytest.mark.component
def test_solve(self, model):
results = solver.solve(model)
# Check for optimal solution
assert results.solver.termination_condition == \
TerminationCondition.optimal
assert results.solver.status == SolverStatus.ok
@pytest.mark.component
def test_solution(self, model):
assert (pytest.approx(33.6, rel=1e-5) ==
value(model.fs.unit.outlet.flow_vol[0]))
assert (pytest.approx(8.4, rel=1e-5) ==
value(model.fs.unit.waste.flow_vol[0]))
assert (pytest.approx(11.25, rel=1e-5) ==
value(model.fs.unit.outlet.conc_mass_comp[0, "A"]))
assert (pytest.approx(5, rel=1e-5) ==
value(model.fs.unit.waste.conc_mass_comp[0, "A"]))
assert (pytest.approx(20, rel=1e-5) ==
value(model.fs.unit.outlet.conc_mass_comp[0, "B"]))
assert (pytest.approx(20, rel=1e-5) ==
value(model.fs.unit.waste.conc_mass_comp[0, "B"]))
assert (pytest.approx(26.25, rel=1e-5) ==
value(model.fs.unit.outlet.conc_mass_comp[0, "C"]))
assert (pytest.approx(45, rel=1e-5) ==
value(model.fs.unit.waste.conc_mass_comp[0, "C"]))
assert (pytest.approx(151000, rel=1e-5) ==
value(model.fs.unit.outlet.pressure[0]))
assert (pytest.approx(150000, rel=1e-5) ==
value(model.fs.unit.waste.pressure[0]))
assert (pytest.approx(303.15, rel=1e-5) ==
value(model.fs.unit.outlet.temperature[0]))
assert (pytest.approx(303.15, rel=1e-5) ==
value(model.fs.unit.waste.temperature[0]))
@pytest.mark.component
def test_conservation(self, model):
assert abs(value(model.fs.unit.inlet.flow_vol[0] -
model.fs.unit.outlet.flow_vol[0] -
model.fs.unit.waste.flow_vol[0])) <= 1e-6
for j in model.fs.water_props.solute_set:
assert (abs(value(model.fs.unit.inlet.flow_vol[0] *
model.fs.unit.inlet.conc_mass_comp[0, j] -
model.fs.unit.outlet.flow_vol[0] *
model.fs.unit.outlet.conc_mass_comp[0, j] -
model.fs.unit.waste.flow_vol[0] *
model.fs.unit.waste.conc_mass_comp[0, j]))
<= 1e-6)
| 43.780236
| 81
| 0.636829
|
5d8c9b96009027deccb29833d6ca63a0dd076d83
| 42,939
|
py
|
Python
|
newspaper/extractors.py
|
gavishpoddar/newspaper
|
bcbae0026c7459e4353ec0878be041f239918730
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
newspaper/extractors.py
|
gavishpoddar/newspaper
|
bcbae0026c7459e4353ec0878be041f239918730
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
newspaper/extractors.py
|
gavishpoddar/newspaper
|
bcbae0026c7459e4353ec0878be041f239918730
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Newspaper uses much of python-goose's extraction code. View their license:
https://github.com/codelucas/newspaper/blob/master/GOOSE-LICENSE.txt
Keep all html page extraction code within this file. Abstract any
lxml or soup parsing code in the parsers.py file!
"""
__title__ = 'newspaper'
__author__ = 'Lucas Ou-Yang'
__license__ = 'MIT'
__copyright__ = 'Copyright 2014, Lucas Ou-Yang'
import copy
import logging
import re
import re
from collections import defaultdict
from dateutil.parser import parse as date_parser
from tldextract import tldextract
from urllib.parse import urljoin, urlparse, urlunparse
from dateparser.search import search_first_date
from . import urls
from .utils import StringReplacement, StringSplitter
log = logging.getLogger(__name__)
MOTLEY_REPLACEMENT = StringReplacement("�", "")
ESCAPED_FRAGMENT_REPLACEMENT = StringReplacement(
"#!", "?_escaped_fragment_=")
TITLE_REPLACEMENTS = StringReplacement("»", "»")
PIPE_SPLITTER = StringSplitter("\\|")
DASH_SPLITTER = StringSplitter(" - ")
UNDERSCORE_SPLITTER = StringSplitter("_")
SLASH_SPLITTER = StringSplitter("/")
ARROWS_SPLITTER = StringSplitter(" » ")
COLON_SPLITTER = StringSplitter(":")
SPACE_SPLITTER = StringSplitter(' ')
NO_STRINGS = set()
A_REL_TAG_SELECTOR = "a[rel=tag]"
A_HREF_TAG_SELECTOR = ("a[href*='/tag/'], a[href*='/tags/'], "
"a[href*='/topic/'], a[href*='?keyword=']")
RE_LANG = r'^[A-Za-z]{2}$'
good_paths = ['story', 'article', 'feature', 'featured', 'slides',
'slideshow', 'gallery', 'news', 'video', 'media',
'v', 'radio', 'press']
bad_chunks = ['careers', 'contact', 'about', 'faq', 'terms', 'privacy',
'advert', 'preferences', 'feedback', 'info', 'browse', 'howto',
'account', 'subscribe', 'donate', 'shop', 'admin']
bad_domains = ['amazon', 'doubleclick', 'twitter']
class ContentExtractor(object):
def __init__(self, config):
self.config = config
self.parser = self.config.get_parser()
self.language = config.language
self.stopwords_class = config.stopwords_class
def update_language(self, meta_lang):
"""Required to be called before the extraction process in some
cases because the stopwords_class has to set incase the lang
is not latin based
"""
if meta_lang:
self.language = meta_lang
self.stopwords_class = \
self.config.get_stopwords_class(meta_lang)
def get_authors(self, doc):
"""Fetch the authors of the article, return as a list
Only works for english articles
"""
_digits = re.compile('\d')
def contains_digits(d):
return bool(_digits.search(d))
def uniqify_list(lst):
"""Remove duplicates from provided list but maintain original order.
Derived from http://www.peterbe.com/plog/uniqifiers-benchmark
"""
seen = {}
result = []
for item in lst:
if item.lower() in seen:
continue
seen[item.lower()] = 1
result.append(item.title())
return result
def parse_byline(search_str):
"""
Takes a candidate line of html or text and
extracts out the name(s) in list form:
>>> parse_byline('<div>By: <strong>Lucas Ou-Yang</strong>,<strong>Alex Smith</strong></div>')
['Lucas Ou-Yang', 'Alex Smith']
"""
# Remove HTML boilerplate
search_str = re.sub('<[^<]+?>', '', search_str)
# Remove original By statement
search_str = re.sub('[bB][yY][\:\s]|[fF]rom[\:\s]', '', search_str)
search_str = search_str.strip()
# Chunk the line by non alphanumeric tokens (few name exceptions)
# >>> re.split("[^\w\'\-\.]", "Tyler G. Jones, Lucas Ou, Dean O'Brian and Ronald")
# ['Tyler', 'G.', 'Jones', '', 'Lucas', 'Ou', '', 'Dean', "O'Brian", 'and', 'Ronald']
name_tokens = re.split("[^\w\'\-\.]", search_str)
name_tokens = [s.strip() for s in name_tokens]
_authors = []
# List of first, last name tokens
curname = []
delimiters = ['and', ',', '']
for token in name_tokens:
if token in delimiters:
if len(curname) > 0:
_authors.append(' '.join(curname))
curname = []
elif not contains_digits(token):
curname.append(token)
# One last check at end
valid_name = (len(curname) >= 2)
if valid_name:
_authors.append(' '.join(curname))
return _authors
# Try 1: Search popular author tags for authors
ATTRS = ['name', 'rel', 'itemprop', 'class', 'id']
VALS = ['auth-nm lnk', 'mobile-auth-nm lnk', 'auth-nm no-lnk', 'mobile-auth-nm no-lnk', 'author', 'byline', 'dc.creator', 'publisher flt']
matches = []
authors = []
for attr in ATTRS:
for val in VALS:
# found = doc.xpath('//*[@%s="%s"]' % (attr, val))
found = self.parser.getElementsByTag(doc, attr=attr, value=val)
matches.extend(found)
for match in matches:
content = ''
if match.tag == 'meta':
mm = match.xpath('@content')
if len(mm) > 0:
content = mm[0]
else:
content = match.text_content() or ''
if len(content) > 0:
authors.extend(parse_byline(content))
return uniqify_list(authors)
# TODO Method 2: Search raw html for a by-line
# match = re.search('By[\: ].*\\n|From[\: ].*\\n', html)
# try:
# # Don't let zone be too long
# line = match.group(0)[:100]
# authors = parse_byline(line)
# except:
# return [] # Failed to find anything
# return authors
def get_publishing_date(self, url, doc):
"""3 strategies for publishing date extraction. The strategies
are descending in accuracy and the next strategy is only
attempted if a preferred one fails.
1. Pubdate from URL
2. Pubdate from metadata
3. Raw regex searches in the HTML + added heuristics
"""
def parse_date_str(date_str):
if date_str:
try:
return date_parser(date_str)
except (ValueError, OverflowError, AttributeError, TypeError):
# near all parse failures are due to URL dates without a day
# specifier, e.g. /2014/04/
return None
date_match = re.search(urls.STRICT_DATE_REGEX, url)
if date_match:
date_str = date_match.group(0)
datetime_obj = parse_date_str(date_str)
if datetime_obj:
return datetime_obj
PUBLISH_DATE_TAGS = [
{'attribute': 'property', 'value': 'rnews:datePublished',
'content': 'content'},
{'attribute': 'property', 'value': 'article:published_time',
'content': 'content'},
{'attribute': 'name', 'value': 'OriginalPublicationDate',
'content': 'content'},
{'attribute': 'itemprop', 'value': 'datePublished',
'content': 'content'},
{'attribute': 'property', 'value': 'og:published_time',
'content': 'content'},
{'attribute': 'property', 'value': 'og:regDate',
'content': 'content'},
{'attribute': 'name', 'value': 'article_date_original',
'content': 'content'},
{'attribute': 'name', 'value': 'publication_date',
'content': 'content'},
{'attribute': 'name', 'value': 'sailthru.date',
'content': 'content'},
{'attribute': 'name', 'value': 'PublishDate',
'content': 'content'},
{'attribute': 'pubdate', 'value': 'pubdate',
'content': 'datetime'},
{'attribute': 'name', 'value': 'publish-date',
'content': 'content'},
{'attribute': 'name', 'value': 'created-date',
'content': 'content'},
{'attribute': 'name', 'value': 'modified-date',
'content': 'content'},
{'attribute': 'name', 'value': 'publish_date',
'content': 'content'},
]
for known_meta_tag in PUBLISH_DATE_TAGS:
meta_tags = self.parser.getElementsByTag(
doc,
attr=known_meta_tag['attribute'],
value=known_meta_tag['value'])
if meta_tags:
date_str = self.parser.getAttribute(
meta_tags[0],
known_meta_tag['content'])
datetime_obj = parse_date_str(date_str)
if datetime_obj:
return datetime_obj
# SPECIAL CASES
# EconomicTimes India
title_element = self.parser.getElementsByTag(doc, tag='time')
if title_element and len(title_element) > 0:
title_text = self.parser.getText(title_element[0])
sfd = search_first_date(title_text)
if sfd:
sfd = sfd[-1]
return sfd
return None
def get_title(self, doc):
"""Fetch the article title and analyze it
Assumptions:
- title tag is the most reliable (inherited from Goose)
- h1, if properly detected, is the best (visible to users)
- og:title and h1 can help improve the title extraction
- python == is too strict, often we need to compare filtered
versions, i.e. lowercase and ignoring special chars
Explicit rules:
1. title == h1, no need to split
2. h1 similar to og:title, use h1
3. title contains h1, title contains og:title, len(h1) > len(og:title), use h1
4. title starts with og:title, use og:title
5. use title, after splitting
"""
title = ''
title_element = self.parser.getElementsByTag(doc, tag='title')
# no title found
if title_element is None or len(title_element) == 0:
return title
# title elem found
title_text = self.parser.getText(title_element[0])
used_delimeter = False
# title from h1
# - extract the longest text from all h1 elements
# - too short texts (fewer than 2 words) are discarded
# - clean double spaces
title_text_h1 = ''
title_element_h1_list = self.parser.getElementsByTag(doc,
tag='h1') or []
title_text_h1_list = [self.parser.getText(tag) for tag in
title_element_h1_list]
if title_text_h1_list:
# sort by len and set the longest
title_text_h1_list.sort(key=len, reverse=True)
title_text_h1 = title_text_h1_list[0]
# discard too short texts
if len(title_text_h1.split(' ')) <= 2:
title_text_h1 = ''
# clean double spaces
title_text_h1 = ' '.join([x for x in title_text_h1.split() if x])
# title from og:title
title_text_fb = (
self.get_meta_content(doc, 'meta[property="og:title"]') or
self.get_meta_content(doc, 'meta[name="og:title"]') or '')
# create filtered versions of title_text, title_text_h1, title_text_fb
# for finer comparison
filter_regex = re.compile(r'[^\u4e00-\u9fa5a-zA-Z0-9\ ]')
filter_title_text = filter_regex.sub('', title_text).lower()
filter_title_text_h1 = filter_regex.sub('', title_text_h1).lower()
filter_title_text_fb = filter_regex.sub('', title_text_fb).lower()
# check for better alternatives for title_text and possibly skip splitting
if title_text_h1 == title_text:
used_delimeter = True
elif filter_title_text_h1 and filter_title_text_h1 == filter_title_text_fb:
title_text = title_text_h1
used_delimeter = True
elif filter_title_text_h1 and filter_title_text_h1 in filter_title_text \
and filter_title_text_fb and filter_title_text_fb in filter_title_text \
and len(title_text_h1) > len(title_text_fb):
title_text = title_text_h1
used_delimeter = True
elif filter_title_text_fb and filter_title_text_fb != filter_title_text \
and filter_title_text.startswith(filter_title_text_fb):
title_text = title_text_fb
used_delimeter = True
# split title with |
if not used_delimeter and '|' in title_text:
title_text = self.split_title(title_text, PIPE_SPLITTER,
title_text_h1)
used_delimeter = True
# split title with -
if not used_delimeter and '-' in title_text:
title_text = self.split_title(title_text, DASH_SPLITTER,
title_text_h1)
used_delimeter = True
# split title with _
if not used_delimeter and '_' in title_text:
title_text = self.split_title(title_text, UNDERSCORE_SPLITTER,
title_text_h1)
used_delimeter = True
# split title with /
if not used_delimeter and '/' in title_text:
title_text = self.split_title(title_text, SLASH_SPLITTER,
title_text_h1)
used_delimeter = True
# split title with »
if not used_delimeter and ' » ' in title_text:
title_text = self.split_title(title_text, ARROWS_SPLITTER,
title_text_h1)
used_delimeter = True
title = MOTLEY_REPLACEMENT.replaceAll(title_text)
# in some cases the final title is quite similar to title_text_h1
# (either it differs for case, for special chars, or it's truncated)
# in these cases, we prefer the title_text_h1
filter_title = filter_regex.sub('', title).lower()
if filter_title_text_h1 == filter_title:
title = title_text_h1
return title
def split_title(self, title, splitter, hint=None):
"""Split the title to best part possible
"""
large_text_length = 0
large_text_index = 0
title_pieces = splitter.split(title)
if hint:
filter_regex = re.compile(r'[^a-zA-Z0-9\ ]')
hint = filter_regex.sub('', hint).lower()
# find the largest title piece
for i, title_piece in enumerate(title_pieces):
current = title_piece.strip()
if hint and hint in filter_regex.sub('', current).lower():
large_text_index = i
break
if len(current) > large_text_length:
large_text_length = len(current)
large_text_index = i
# replace content
title = title_pieces[large_text_index]
return TITLE_REPLACEMENTS.replaceAll(title).strip()
def get_feed_urls(self, source_url, categories):
"""Takes a source url and a list of category objects and returns
a list of feed urls
"""
total_feed_urls = []
for category in categories:
kwargs = {'attr': 'type', 'value': 'application\/rss\+xml'}
feed_elements = self.parser.getElementsByTag(
category.doc, **kwargs)
feed_urls = [e.get('href') for e in feed_elements if e.get('href')]
total_feed_urls.extend(feed_urls)
total_feed_urls = total_feed_urls[:50]
total_feed_urls = [urls.prepare_url(f, source_url)
for f in total_feed_urls]
total_feed_urls = list(set(total_feed_urls))
return total_feed_urls
def get_favicon(self, doc):
"""Extract the favicon from a website http://en.wikipedia.org/wiki/Favicon
<link rel="shortcut icon" type="image/png" href="favicon.png" />
<link rel="icon" type="image/png" href="favicon.png" />
"""
kwargs = {'tag': 'link', 'attr': 'rel', 'value': 'icon'}
meta = self.parser.getElementsByTag(doc, **kwargs)
if meta:
favicon = self.parser.getAttribute(meta[0], 'href')
return favicon
return ''
def get_meta_lang(self, doc):
"""Extract content language from meta
"""
# we have a lang attribute in html
attr = self.parser.getAttribute(doc, attr='lang')
if attr is None:
# look up for a Content-Language in meta
items = [
{'tag': 'meta', 'attr': 'http-equiv',
'value': 'content-language'},
{'tag': 'meta', 'attr': 'name', 'value': 'lang'}
]
for item in items:
meta = self.parser.getElementsByTag(doc, **item)
if meta:
attr = self.parser.getAttribute(
meta[0], attr='content')
break
if attr:
value = attr[:2]
if re.search(RE_LANG, value):
return value.lower()
return None
def get_meta_content(self, doc, metaname):
"""Extract a given meta content form document.
Example metaNames:
"meta[name=description]"
"meta[name=keywords]"
"meta[property=og:type]"
"""
meta = self.parser.css_select(doc, metaname)
content = None
if meta is not None and len(meta) > 0:
content = self.parser.getAttribute(meta[0], 'content')
if content:
return content.strip()
return ''
def get_meta_img_url(self, article_url, doc):
"""Returns the 'top img' as specified by the website
"""
top_meta_image, try_one, try_two, try_three, try_four = [None] * 5
try_one = self.get_meta_content(doc, 'meta[property="og:image"]')
if not try_one:
link_img_src_kwargs = \
{'tag': 'link', 'attr': 'rel', 'value': 'img_src|image_src'}
elems = self.parser.getElementsByTag(doc, use_regex=True, **link_img_src_kwargs)
try_two = elems[0].get('href') if elems else None
if not try_two:
try_three = self.get_meta_content(doc, 'meta[name="og:image"]')
if not try_three:
link_icon_kwargs = {'tag': 'link', 'attr': 'rel', 'value': 'icon'}
elems = self.parser.getElementsByTag(doc, **link_icon_kwargs)
try_four = elems[0].get('href') if elems else None
top_meta_image = try_one or try_two or try_three or try_four
if top_meta_image:
return urljoin(article_url, top_meta_image)
return ''
def get_meta_type(self, doc):
"""Returns meta type of article, open graph protocol
"""
return self.get_meta_content(doc, 'meta[property="og:type"]')
def get_meta_site_name(self, doc):
"""Returns site name of article, open graph protocol
"""
return self.get_meta_content(doc, 'meta[property="og:site_name"]')
def get_meta_description(self, doc):
"""If the article has meta description set in the source, use that
"""
return self.get_meta_content(doc, "meta[name=description]")
def get_meta_keywords(self, doc):
"""If the article has meta keywords set in the source, use that
"""
return self.get_meta_content(doc, "meta[name=keywords]")
def get_meta_data(self, doc):
data = defaultdict(dict)
properties = self.parser.css_select(doc, 'meta')
for prop in properties:
key = prop.attrib.get('property') or prop.attrib.get('name')
value = prop.attrib.get('content') or prop.attrib.get('value')
if not key or not value:
continue
key, value = key.strip(), value.strip()
if value.isdigit():
value = int(value)
if ':' not in key:
data[key] = value
continue
key = key.split(':')
key_head = key.pop(0)
ref = data[key_head]
if isinstance(ref, str) or isinstance(ref, int):
data[key_head] = {key_head: ref}
ref = data[key_head]
for idx, part in enumerate(key):
if idx == len(key) - 1:
ref[part] = value
break
if not ref.get(part):
ref[part] = dict()
elif isinstance(ref.get(part), str) or isinstance(ref.get(part), int):
# Not clear what to do in this scenario,
# it's not always a URL, but an ID of some sort
ref[part] = {'identifier': ref[part]}
ref = ref[part]
return data
def get_canonical_link(self, article_url, doc):
"""
Return the article's canonical URL
Gets the first available value of:
1. The rel=canonical tag
2. The og:url tag
"""
links = self.parser.getElementsByTag(doc, tag='link', attr='rel',
value='canonical')
canonical = self.parser.getAttribute(links[0], 'href') if links else ''
og_url = self.get_meta_content(doc, 'meta[property="og:url"]')
meta_url = canonical or og_url or ''
if meta_url:
meta_url = meta_url.strip()
parsed_meta_url = urlparse(meta_url)
if not parsed_meta_url.hostname:
# MIGHT not have a hostname in meta_url
# parsed_url.path might be 'example.com/article.html' where
# clearly example.com is the hostname
parsed_article_url = urlparse(article_url)
strip_hostname_in_meta_path = re. \
match(".*{}(?=/)/(.*)".
format(parsed_article_url.hostname),
parsed_meta_url.path)
try:
true_path = strip_hostname_in_meta_path.group(1)
except AttributeError:
true_path = parsed_meta_url.path
# true_path may contain querystrings and fragments
meta_url = urlunparse((parsed_article_url.scheme,
parsed_article_url.hostname, true_path,
'', '', ''))
return meta_url
def get_img_urls(self, article_url, doc):
"""Return all of the images on an html page, lxml root
"""
img_kwargs = {'tag': 'img'}
img_tags = self.parser.getElementsByTag(doc, **img_kwargs)
urls = [img_tag.get('src')
for img_tag in img_tags if img_tag.get('src')]
img_links = set([urljoin(article_url, url)
for url in urls])
return img_links
def get_first_img_url(self, article_url, top_node):
"""Retrieves the first image in the 'top_node'
The top node is essentially the HTML markdown where the main
article lies and the first image in that area is probably signifigcant.
"""
node_images = self.get_img_urls(article_url, top_node)
node_images = list(node_images)
if node_images:
return urljoin(article_url, node_images[0])
return ''
def _get_urls(self, doc, titles):
"""Return a list of urls or a list of (url, title_text) tuples
if specified.
"""
if doc is None:
return []
a_kwargs = {'tag': 'a'}
a_tags = self.parser.getElementsByTag(doc, **a_kwargs)
# TODO: this should be refactored! We should have a separate
# method which siphones the titles our of a list of <a> tags.
if titles:
return [(a.get('href'), a.text) for a in a_tags if a.get('href')]
return [a.get('href') for a in a_tags if a.get('href')]
def get_urls(self, doc_or_html, titles=False, regex=False):
"""`doc_or_html`s html page or doc and returns list of urls, the regex
flag indicates we don't parse via lxml and just search the html.
"""
if doc_or_html is None:
log.critical('Must extract urls from either html, text or doc!')
return []
# If we are extracting from raw text
if regex:
doc_or_html = re.sub('<[^<]+?>', ' ', str(doc_or_html))
doc_or_html = re.findall(
'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|'
'(?:%[0-9a-fA-F][0-9a-fA-F]))+', doc_or_html)
doc_or_html = [i.strip() for i in doc_or_html]
return doc_or_html or []
# If the doc_or_html is html, parse it into a root
if isinstance(doc_or_html, str):
doc = self.parser.fromstring(doc_or_html)
else:
doc = doc_or_html
return self._get_urls(doc, titles)
def get_category_urls(self, source_url, doc):
"""Inputs source lxml root and source url, extracts domain and
finds all of the top level urls, we are assuming that these are
the category urls.
cnn.com --> [cnn.com/latest, world.cnn.com, cnn.com/asia]
"""
page_urls = self.get_urls(doc)
valid_categories = []
for p_url in page_urls:
scheme = urls.get_scheme(p_url, allow_fragments=False)
domain = urls.get_domain(p_url, allow_fragments=False)
path = urls.get_path(p_url, allow_fragments=False)
if not domain and not path:
if self.config.verbose:
print('elim category url %s for no domain and path'
% p_url)
continue
if path and path.startswith('#'):
if self.config.verbose:
print('elim category url %s path starts with #' % p_url)
continue
if scheme and (scheme != 'http' and scheme != 'https'):
if self.config.verbose:
print(('elim category url %s for bad scheme, '
'not http nor https' % p_url))
continue
if domain:
child_tld = tldextract.extract(p_url)
domain_tld = tldextract.extract(source_url)
child_subdomain_parts = child_tld.subdomain.split('.')
subdomain_contains = False
for part in child_subdomain_parts:
if part == domain_tld.domain:
if self.config.verbose:
print(('subdomain contains at %s and %s' %
(str(part), str(domain_tld.domain))))
subdomain_contains = True
break
# Ex. microsoft.com is definitely not related to
# espn.com, but espn.go.com is probably related to espn.com
if not subdomain_contains and \
(child_tld.domain != domain_tld.domain):
if self.config.verbose:
print(('elim category url %s for domain '
'mismatch' % p_url))
continue
elif child_tld.subdomain in ['m', 'i']:
if self.config.verbose:
print(('elim category url %s for mobile '
'subdomain' % p_url))
continue
else:
valid_categories.append(scheme + '://' + domain)
# TODO account for case where category is in form
# http://subdomain.domain.tld/category/ <-- still legal!
else:
# we want a path with just one subdir
# cnn.com/world and cnn.com/world/ are both valid_categories
path_chunks = [x for x in path.split('/') if len(x) > 0]
if 'index.html' in path_chunks:
path_chunks.remove('index.html')
if len(path_chunks) == 1 and len(path_chunks[0]) < 14:
valid_categories.append(domain + path)
else:
if self.config.verbose:
print(('elim category url %s for >1 path chunks '
'or size path chunks' % p_url))
stopwords = [
'about', 'help', 'privacy', 'legal', 'feedback', 'sitemap',
'profile', 'account', 'mobile', 'sitemap', 'facebook', 'myspace',
'twitter', 'linkedin', 'bebo', 'friendster', 'stumbleupon',
'youtube', 'vimeo', 'store', 'mail', 'preferences', 'maps',
'password', 'imgur', 'flickr', 'search', 'subscription', 'itunes',
'siteindex', 'events', 'stop', 'jobs', 'careers', 'newsletter',
'subscribe', 'academy', 'shopping', 'purchase', 'site-map',
'shop', 'donate', 'newsletter', 'product', 'advert', 'info',
'tickets', 'coupons', 'forum', 'board', 'archive', 'browse',
'howto', 'how to', 'faq', 'terms', 'charts', 'services',
'contact', 'plus', 'admin', 'login', 'signup', 'register',
'developer', 'proxy']
_valid_categories = []
# TODO Stop spamming urlparse and tldextract calls...
for p_url in valid_categories:
path = urls.get_path(p_url)
subdomain = tldextract.extract(p_url).subdomain
conjunction = path + ' ' + subdomain
bad = False
for badword in stopwords:
if badword.lower() in conjunction.lower():
if self.config.verbose:
print(('elim category url %s for subdomain '
'contain stopword!' % p_url))
bad = True
break
if not bad:
_valid_categories.append(p_url)
_valid_categories.append('/') # add the root
for i, p_url in enumerate(_valid_categories):
if p_url.startswith('://'):
p_url = 'http' + p_url
_valid_categories[i] = p_url
elif p_url.startswith('//'):
p_url = 'http:' + p_url
_valid_categories[i] = p_url
if p_url.endswith('/'):
p_url = p_url[:-1]
_valid_categories[i] = p_url
_valid_categories = list(set(_valid_categories))
category_urls = [urls.prepare_url(p_url, source_url)
for p_url in _valid_categories]
category_urls = [c for c in category_urls if c is not None]
return category_urls
def extract_tags(self, doc):
if len(list(doc)) == 0:
return NO_STRINGS
elements = self.parser.css_select(
doc, A_REL_TAG_SELECTOR)
if not elements:
elements = self.parser.css_select(
doc, A_HREF_TAG_SELECTOR)
if not elements:
return NO_STRINGS
tags = []
for el in elements:
tag = self.parser.getText(el)
if tag:
tags.append(tag)
return set(tags)
def calculate_best_node(self, doc):
top_node = None
nodes_to_check = self.nodes_to_check(doc)
starting_boost = float(1.0)
cnt = 0
i = 0
parent_nodes = []
nodes_with_text = []
for node in nodes_to_check:
text_node = self.parser.getText(node)
word_stats = self.stopwords_class(language=self.language). \
get_stopword_count(text_node)
high_link_density = self.is_highlink_density(node)
if word_stats.get_stopword_count() > 2 and not high_link_density:
nodes_with_text.append(node)
nodes_number = len(nodes_with_text)
negative_scoring = 0
bottom_negativescore_nodes = float(nodes_number) * 0.25
for node in nodes_with_text:
boost_score = float(0)
# boost
if self.is_boostable(node):
if cnt >= 0:
boost_score = float((1.0 / starting_boost) * 50)
starting_boost += 1
# nodes_number
if nodes_number > 15:
if (nodes_number - i) <= bottom_negativescore_nodes:
booster = float(
bottom_negativescore_nodes - (nodes_number - i))
boost_score = float(-pow(booster, float(2)))
negscore = abs(boost_score) + negative_scoring
if negscore > 40:
boost_score = float(5)
text_node = self.parser.getText(node)
word_stats = self.stopwords_class(language=self.language). \
get_stopword_count(text_node)
upscore = int(word_stats.get_stopword_count() + boost_score)
parent_node = self.parser.getParent(node)
self.update_score(parent_node, upscore)
self.update_node_count(parent_node, 1)
if parent_node not in parent_nodes:
parent_nodes.append(parent_node)
# Parent of parent node
parent_parent_node = self.parser.getParent(parent_node)
if parent_parent_node is not None:
self.update_node_count(parent_parent_node, 1)
self.update_score(parent_parent_node, upscore / 2)
if parent_parent_node not in parent_nodes:
parent_nodes.append(parent_parent_node)
cnt += 1
i += 1
top_node_score = 0
for e in parent_nodes:
score = self.get_score(e)
if score > top_node_score:
top_node = e
top_node_score = score
if top_node is None:
top_node = e
return top_node
def is_boostable(self, node):
"""A lot of times the first paragraph might be the caption under an image
so we'll want to make sure if we're going to boost a parent node that
it should be connected to other paragraphs, at least for the first n
paragraphs so we'll want to make sure that the next sibling is a
paragraph and has at least some substantial weight to it.
"""
para = "p"
steps_away = 0
minimum_stopword_count = 5
max_stepsaway_from_node = 3
nodes = self.walk_siblings(node)
for current_node in nodes:
# <p>
current_node_tag = self.parser.getTag(current_node)
if current_node_tag == para:
if steps_away >= max_stepsaway_from_node:
return False
paragraph_text = self.parser.getText(current_node)
word_stats = self.stopwords_class(language=self.language). \
get_stopword_count(paragraph_text)
if word_stats.get_stopword_count() > minimum_stopword_count:
return True
steps_away += 1
return False
def walk_siblings(self, node):
return self.parser.previousSiblings(node)
def add_siblings(self, top_node):
baseline_score_siblings_para = self.get_siblings_score(top_node)
results = self.walk_siblings(top_node)
for current_node in results:
ps = self.get_siblings_content(
current_node, baseline_score_siblings_para)
for p in ps:
top_node.insert(0, p)
return top_node
def get_siblings_content(
self, current_sibling, baseline_score_siblings_para):
"""Adds any siblings that may have a decent score to this node
"""
if current_sibling.tag == 'p' and \
len(self.parser.getText(current_sibling)) > 0:
e0 = current_sibling
if e0.tail:
e0 = copy.deepcopy(e0)
e0.tail = ''
return [e0]
else:
potential_paragraphs = self.parser.getElementsByTag(
current_sibling, tag='p')
if potential_paragraphs is None:
return None
else:
ps = []
for first_paragraph in potential_paragraphs:
text = self.parser.getText(first_paragraph)
if len(text) > 0:
word_stats = self.stopwords_class(
language=self.language). \
get_stopword_count(text)
paragraph_score = word_stats.get_stopword_count()
sibling_baseline_score = float(.30)
high_link_density = self.is_highlink_density(
first_paragraph)
score = float(baseline_score_siblings_para *
sibling_baseline_score)
if score < paragraph_score and not high_link_density:
p = self.parser.createElement(
tag='p', text=text, tail=None)
ps.append(p)
return ps
def get_siblings_score(self, top_node):
"""We could have long articles that have tons of paragraphs
so if we tried to calculate the base score against
the total text score of those paragraphs it would be unfair.
So we need to normalize the score based on the average scoring
of the paragraphs within the top node.
For example if our total score of 10 paragraphs was 1000
but each had an average value of 100 then 100 should be our base.
"""
base = 100000
paragraphs_number = 0
paragraphs_score = 0
nodes_to_check = self.parser.getElementsByTag(top_node, tag='p')
for node in nodes_to_check:
text_node = self.parser.getText(node)
word_stats = self.stopwords_class(language=self.language). \
get_stopword_count(text_node)
high_link_density = self.is_highlink_density(node)
if word_stats.get_stopword_count() > 2 and not high_link_density:
paragraphs_number += 1
paragraphs_score += word_stats.get_stopword_count()
if paragraphs_number > 0:
base = paragraphs_score / paragraphs_number
return base
def update_score(self, node, add_to_score):
"""Adds a score to the gravityScore Attribute we put on divs
we'll get the current score then add the score we're passing
in to the current.
"""
current_score = 0
score_string = self.parser.getAttribute(node, 'gravityScore')
if score_string:
current_score = float(score_string)
new_score = current_score + add_to_score
self.parser.setAttribute(node, "gravityScore", str(new_score))
def update_node_count(self, node, add_to_count):
"""Stores how many decent nodes are under a parent node
"""
current_score = 0
count_string = self.parser.getAttribute(node, 'gravityNodes')
if count_string:
current_score = int(count_string)
new_score = current_score + add_to_count
self.parser.setAttribute(node, "gravityNodes", str(new_score))
def is_highlink_density(self, e):
"""Checks the density of links within a node, if there is a high
link to text ratio, then the text is less likely to be relevant
"""
links = self.parser.getElementsByTag(e, tag='a')
if not links:
return False
text = self.parser.getText(e)
words = [word for word in text.split() if word.isalnum()]
if not words:
return True
words_number = float(len(words))
sb = []
for link in links:
sb.append(self.parser.getText(link))
link_text = ''.join(sb)
link_words = link_text.split()
num_link_words = float(len(link_words))
num_links = float(len(links))
link_divisor = float(num_link_words / words_number)
score = float(link_divisor * num_links)
if score >= 1.0:
return True
return False
# return True if score > 1.0 else False
def get_score(self, node):
"""Returns the gravityScore as an integer from this node
"""
return self.get_node_gravity_score(node) or 0
def get_node_gravity_score(self, node):
gravity_score = self.parser.getAttribute(node, 'gravityScore')
if not gravity_score:
return None
return float(gravity_score)
def nodes_to_check(self, doc):
"""Returns a list of nodes we want to search
on like paragraphs and tables
"""
nodes_to_check = []
for tag in ['p', 'pre', 'td']:
items = self.parser.getElementsByTag(doc, tag=tag)
nodes_to_check += items
return nodes_to_check
def is_table_and_no_para_exist(self, e):
sub_paragraphs = self.parser.getElementsByTag(e, tag='p')
for p in sub_paragraphs:
txt = self.parser.getText(p)
if len(txt) < 25:
self.parser.remove(p)
sub_paragraphs_2 = self.parser.getElementsByTag(e, tag='p')
if len(sub_paragraphs_2) == 0 and e.tag != "td":
return True
return False
def is_nodescore_threshold_met(self, node, e):
top_node_score = self.get_score(node)
current_node_score = self.get_score(e)
threshold = float(top_node_score * .08)
if (current_node_score < threshold) and e.tag != 'td':
return False
return True
def post_cleanup(self, top_node):
"""Remove any divs that looks like non-content, clusters of links,
or paras with no gusto; add adjacent nodes which look contenty
"""
node = self.add_siblings(top_node)
for e in self.parser.getChildren(node):
e_tag = self.parser.getTag(e)
if e_tag != 'p':
if self.is_highlink_density(e):
self.parser.remove(e)
return node
| 39.869081
| 146
| 0.55679
|
6326a5b45fe6e572b10044e452b0c359367b0a10
| 66,994
|
py
|
Python
|
tensorflow/python/eager/function_test.py
|
Dectinc/tensorflow
|
36922f9b27a0c51987966945890766aa4954a1ab
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/eager/function_test.py
|
Dectinc/tensorflow
|
36922f9b27a0c51987966945890766aa4954a1ab
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/eager/function_test.py
|
Dectinc/tensorflow
|
36922f9b27a0c51987966945890766aa4954a1ab
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from multiprocessing.pool import ThreadPool
import sys
import weakref
import numpy
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import keras
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function as tf_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training as keras_training
from tensorflow.python.layers import convolutional
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import momentum
from tensorflow.python.training import training_ops
from tensorflow.python.util import compat
from tensorflow.python.util import nest
class MiniModel(keras_training.Model):
"""Minimal model for mnist.
Useful for testing and debugging on slow TPU simulators.
"""
def __init__(self):
super(MiniModel, self).__init__(name='')
self.fc = keras.layers.Dense(1, name='fc', kernel_initializer='ones',
bias_initializer='ones')
def call(self, inputs, training=True):
return self.fc(inputs)
class DefunnedMiniModel(MiniModel):
@function.defun
def call(self, inputs, training=True):
return super(DefunnedMiniModel, self).call(inputs, training=training)
@test_util.with_c_shapes
class FunctionTest(test.TestCase):
def testBasic(self):
matmul = function.defun(math_ops.matmul)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq = matmul(t, t, transpose_a=True)
sq2 = matmul(sq, t, transpose_a=True)
self.assertAllEqual(sq.numpy().reshape(-1), [10, 14, 14, 20])
self.assertAllEqual(sq2.numpy().reshape(-1), [52, 76, 74, 108])
def testBasicGraphMode(self):
matmul = function.defun(math_ops.matmul)
@function.defun
def sq(a):
return matmul(a, a)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out = sq(t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testNestedInputsGraphMode(self):
matmul = function.defun(math_ops.matmul)
pair = collections.namedtuple('pair', ['a', 'b'])
@function.defun
def a_times_b(inputs):
return matmul(inputs.a['a'], inputs.b['b'])
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out = a_times_b(pair({'a': t}, {'b': t}))
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testGraphModeWithGradients(self):
v = resource_variable_ops.ResourceVariable(1.0, name='v')
@function.defun
def step():
def inner():
return v * v
return backprop.implicit_grad(inner)()[0][0]
self.assertAllEqual(step(), 2.0)
def testGraphGradientVariable(self):
with ops.Graph().as_default(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def f():
return 2.0 * v
node = f()
grads, = gradients_impl.gradients(node, v)
v.initializer.run()
self.assertAllEqual(grads.eval(), 2.0)
self.assertEqual(grads.shape, v.shape)
def testGraphEagerIsolation(self):
@function.defun
def f():
self.v = resource_variable_ops.ResourceVariable(1.0)
return self.v.read_value()
self.assertAllEqual(f(), 1.0)
with ops.Graph().as_default():
self.assertEqual(f().shape, ())
def testBasicGraphFunction(self):
matmul = function.defun(math_ops.matmul)
@function.defun
def sq(a):
return matmul(a, a)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq_op = sq.get_concrete_function(t)
self.assertEqual(sq_op.output_shapes, tensor_shape.TensorShape([2, 2]))
out = sq_op(t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testExecutingStatelessDefunConcurrently(self):
@function.defun
def stateless(x):
return math_ops.multiply(2.0, x)
pool = ThreadPool()
inputs = [constant_op.constant(1.0 * x) for x in range(100)]
outputs = [float(out) for out in pool.map(stateless, inputs)]
expected = [float(2.0 * x) for x in inputs]
self.assertSequenceEqual(outputs, expected)
def testExecutingManyStatelessDefunsConcurrently(self):
@function.defun
def stateless(x):
del x
return math_ops.multiply(2.0, 2.0)
pool = ThreadPool()
# `pool.map` below instantiates 100 functions, one for each object.
outputs = [
float(out)
for out in pool.map(stateless, [object() for _ in range(100)])
]
expected = [4.0] * 100
self.assertSequenceEqual(outputs, expected)
def testExecutingStatefulDefunConcurrently(self):
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def stateful(x):
v.assign(x)
pool = ThreadPool()
inputs = [constant_op.constant(0.0)] * 100
pool.map(stateful, inputs)
self.assertEqual(float(v.read_value()), 0.0)
def testExecutingManyStatefulDefunsConcurrently(self):
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def stateful(x):
del x
return v.assign(0.0)
pool = ThreadPool()
# `pool.map` below instantiates 100 functions, one for each object.
pool.map(stateful, [object() for _ in range(100)])
self.assertEqual(float(v.read_value()), 0.0)
def disabled_testRandomSeed(self):
@function.defun
def f():
return random_ops.random_normal(())
random_seed.set_random_seed(1)
x = f()
self.assertNotEqual(x, f())
random_seed.set_random_seed(1)
self.assertAllEqual(f(), x)
def testSymGradGatherNd(self):
with ops.Graph().as_default(), self.cached_session() as sess:
@function.defun
def f(x):
return array_ops.gather_nd(x, [[0]])
c = constant_op.constant([[2.]])
f_c = f(c)
g, = gradients_impl.gradients(f_c, c)
self.assertAllEqual(sess.run(g), [[1.0]])
def testNestedInputsGraphFunction(self):
matmul = function.defun(math_ops.matmul)
pair = collections.namedtuple('pair', ['a', 'b'])
@function.defun
def a_times_b(inputs):
return matmul(inputs.a['a'], inputs.b['b'])
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
inputs = pair({'a': t}, {'b': t})
sq_op = a_times_b.get_concrete_function(inputs)
self.assertEqual(sq_op.output_shapes, tensor_shape.TensorShape([2, 2]))
out = sq_op(inputs)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testNestedOutputGraphFunction(self):
matmul = function.defun(math_ops.matmul)
@function.defun
def sq(a):
return (matmul(a, a), {'b': constant_op.constant(1.0)})
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq_op = sq.get_concrete_function(t)
self.assertEqual(sq_op.output_shapes,
(tensor_shape.TensorShape([2, 2]),
{'b': tensor_shape.TensorShape([])}))
self.assertEqual(sq_op.output_dtypes,
(dtypes.float32, {'b': dtypes.float32}))
(a, b) = sq_op(t)
self.assertAllEqual(a, math_ops.matmul(t, t).numpy())
self.assertAllEqual(b['b'].numpy(), 1.0)
def testGraphFunctionWithGradients(self):
v = resource_variable_ops.ResourceVariable(1.0, name='v')
@function.defun
def step():
def inner():
return v * v
return backprop.implicit_grad(inner)()[0][0]
step_op = step.get_concrete_function()
self.assertEqual(step_op.output_dtypes, dtypes.float32)
self.assertEqual(step_op.output_shapes, tensor_shape.TensorShape([]))
self.assertAllEqual(step_op(), 2.0)
def testGraphFunctionNoneOutput(self):
@function.defun
def fn(unused_a, unused_b):
return None
x = constant_op.constant(1)
fn_op = fn.get_concrete_function(x, x)
self.assertEqual(fn_op.output_dtypes, None)
self.assertEqual(fn_op.output_shapes, None)
self.assertAllEqual(fn_op(x, x), None)
@test_util.run_in_graph_and_eager_modes()
def testDefunCondGradient(self):
@function.defun
def f(x):
return control_flow_ops.cond(x > 0.5, lambda: 2 * x, lambda: 3 * x)
with backprop.GradientTape() as t:
x = constant_op.constant(1.0)
t.watch(x)
y = f(x)
self.assertAllEqual(self.evaluate(t.gradient(y, x)), 2.0)
@test_util.run_in_graph_and_eager_modes()
def testGraphLoopGradient(self):
@function.defun
def f(x):
return control_flow_ops.while_loop(lambda _, i: i < 2,
lambda x, i: (2*x, i + 1),
[x, 0])[0]
with backprop.GradientTape() as t:
x = constant_op.constant(1.0)
t.watch(x)
y = f(x)
self.assertAllEqual(self.evaluate(t.gradient(y, x)), 4.0)
def testDefunNumpyArraysConvertedToTensors(self):
def f(x):
self.assertIsInstance(x, ops.Tensor)
return x
x = random_ops.random_uniform([2, 2]).numpy()
defined = function.defun(f)
defined(x)
self.assertEqual(len(defined._function_cache), 1)
x = random_ops.random_uniform([2, 2]).numpy()
defined(x)
# A NumPy array with different values but the same shape and dtype
# shouldn't trigger another function definition.
self.assertEqual(len(defined._function_cache), 1)
# Test that the numpy array is properly an argument to the graph function.
self.assertEqual(1., defined(numpy.ones([])).numpy())
self.assertEqual(0., defined(numpy.zeros([])).numpy())
self.assertEqual(1., defined(array_ops.ones([])).numpy())
self.assertEqual(0., defined(array_ops.zeros([])).numpy())
def testDefunCapturedInt32(self):
x = constant_op.constant(1, dtype=dtypes.int32)
@function.defun
def add_int32s():
return x + x
self.assertEqual(2, int(add_int32s()))
def testDefunReadVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def f():
return v.read_value()
self.assertEqual(1.0, float(f()))
def testDefunAssignAddVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
x = constant_op.constant(2.0)
@function.defun
def test_assign_add():
v.assign_add(x)
return v.read_value()
self.assertEqual(3.0, float(test_assign_add()))
@test_util.run_in_graph_and_eager_modes
def testTensorInitializationInFunctionRaisesError(self):
error_msg = ('Tensor-typed variable initializers must either be '
'wrapped in an init_scope or callable.*')
@function.defun
def tensor_init():
with self.assertRaisesRegexp(ValueError, error_msg):
resource_variable_ops.ResourceVariable(constant_op.constant(2.0))
tensor_init()
@test_util.run_in_graph_and_eager_modes
def testCallableTensorInitializationInFunction(self):
@function.defun
def tensor_init():
self.v = resource_variable_ops.ResourceVariable(
lambda: constant_op.constant(2.0))
return self.v.read_value()
value = tensor_init()
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(value), 2.0)
@test_util.run_in_graph_and_eager_modes
def testInitScopeTensorInitializationInFunction(self):
@function.defun
def tensor_init():
with ops.init_scope():
const = constant_op.constant(2.0)
self.v = resource_variable_ops.ResourceVariable(const)
return self.v.read_value()
value = tensor_init()
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(value), 2.0)
def testDefunShapeInferenceWithCapturedResourceVariable(self):
v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.get_shape(), tensor_shape.TensorShape([2, 2]))
compiled = function.defun(f)
compiled()
def testVariableInLoopInFunction(self):
@function.defun
def test_function():
def loop_test(_):
return False
def loop_body(_):
return variable_scope.get_variable('a', shape=())
return control_flow_ops.while_loop(loop_test, loop_body, [0.0])
self.assertEqual(test_function().shape, [])
def testDefunShapeInferenceWithCapturedResourceVariableInGraphMode(self):
with context.graph_mode():
v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.get_shape(), tensor_shape.TensorShape([2, 2]))
compiled = function.defun(f)
compiled()
def testDefunShapeInferenceWithCapturedVariableInGraphMode(self):
with context.graph_mode():
v = variables.Variable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.get_shape(), tensor_shape.TensorShape([2, 2]))
# Check that shape inference works while creating the defun
compiled = function.defun(f)
compiled()
@test_util.run_in_graph_and_eager_modes
def testDefunForcesResourceVariables(self):
def variable_creator():
self.v = variables.Variable(0.0)
return self.v.read_value()
self.v = None
defined = function.defun(variable_creator)
defined() # Create the variable.
self.assertIsInstance(
self.v, resource_variable_ops.ResourceVariable)
def testDefunDifferentiable(self):
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def f():
return v * v
self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)
def testDefunCanBeDifferentiatedTwice(self):
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def f():
return v * v
self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)
# Ensure that v is watched again.
self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)
def testGraphModeCaptureVariable(self):
with context.graph_mode(), self.cached_session() as sess:
class HasAVar(object):
def __init__(self):
self.v = resource_variable_ops.ResourceVariable(1.0)
def call(self):
return self.v * 2
o = HasAVar()
variables.global_variables_initializer().run()
call = function.defun(o.call)
op = call()
self.assertAllEqual(sess.run(op), 2.0)
def testSymbolicGradientVariableZerosLike(self):
with ops.Graph().as_default():
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def f(x, v):
v.read_value()
return x * x
x = constant_op.constant(1.0)
l = f(x, v)
_, dv = gradients_impl.gradients(l, [x, v])
with self.cached_session():
v.initializer.run()
self.assertAllEqual(dv.eval(), 0.0)
def testGraphModeManyFunctions(self):
with context.graph_mode(), self.cached_session():
@function.defun
def f(x):
return x * x
@function.defun
def g(x):
return f(x) + 1
self.assertAllEqual(g(constant_op.constant(2.0)).eval(), 5.0)
def testDict(self):
@function.defun
def f(x):
return {'name': x + 1}
self.assertAllEqual(f(constant_op.constant(1.0))['name'], 2.0)
def testTensorConversionWithDefun(self):
@function.defun
def f(x):
return math_ops.add(x, constant_op.constant(3))
self.assertAllEqual(5, f(constant_op.constant(2)))
def testTensorConversionCall(self):
@function.defun
def f(x):
return math_ops.add(x, constant_op.constant(3))
@function.defun
def g(x):
return f(f(x))
self.assertAllEqual(8, g(constant_op.constant(2)))
def testDefunCallBackprop(self):
@function.defun
def f(x):
return math_ops.add(x, x)
@function.defun
def g(x):
return backprop.gradients_function(f, [0])(x)[0]
self.assertAllEqual(2, g(constant_op.constant(2.)))
def testGraphModeEagerGradError(self):
with context.graph_mode():
def f():
x = variable_scope.get_variable(
'v', initializer=constant_op.constant(1.0))
return x * constant_op.constant(2.0)
with self.assertRaisesRegexp(ValueError,
'No trainable variables were accessed'):
backprop.implicit_val_and_grad(f)()
def testDefunCallBackpropUsingSameObjectForMultipleArguments(self):
@function.defun
def g(x):
return backprop.gradients_function(math_ops.multiply, [0, 1])(x, x)
def np_g(x):
return [d.numpy() for d in g(x)]
x = constant_op.constant(1.)
self.assertAllEqual([1., 1.], np_g(x))
self.assertAllEqual([1., 1.], np_g(1.))
def testCallShape(self):
@function.defun
def f(x):
return x + 1
@function.defun
def g(x):
x = f(x)
self.assertEqual(x.shape.as_list(), [])
return None
g(constant_op.constant(1.0))
def testNestedDefunWithNoOutputAndTapedInput(self):
three = resource_variable_ops.ResourceVariable(3.0, name='v')
@function.defun
def f(x):
# This function intentionally takes a taped variable as input,
# but does not return any values
math_ops.add(x, three)
@function.defun
def g(x):
y = math_ops.add(x, three)
f(y)
g(three)
def testGradientTensorConversionWithDefun(self):
three = resource_variable_ops.ResourceVariable(3.0, name='v')
@function.defun
def f(x):
return math_ops.add(x, three)
def g(x):
return f(x)
g = backprop.implicit_grad(g)(constant_op.constant(1.0))[0][0]
self.assertAllEqual(g, 1.0)
def testGradient(self):
matmul = function.defun(math_ops.matmul)
def sq(x):
return matmul(x, x, transpose_a=True)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
grad_t, = backprop.gradients_function(sq, [0])(t)
self.assertAllEqual(grad_t, [[6, 6], [14, 14]])
def testGradientInFunction(self):
@function.defun
def f(x):
return backprop.gradients_function(lambda y: y * y, [0])(x)[0]
self.assertAllEqual(f(constant_op.constant(1.0)), 2.0)
def testGatherResourceWithDefun(self):
with ops.device('cpu:0'):
v = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
def sum_gather():
return math_ops.reduce_sum(array_ops.gather(v, [1, 2]))
defined = function.defun(sum_gather)
self.assertAllEqual(sum_gather(), defined())
def testGradientOfGatherWithDefun(self):
v = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
def sum_gather():
return math_ops.reduce_sum(array_ops.gather(v, [1, 2]))
grad_fn = backprop.implicit_grad(sum_gather)
gradient = grad_fn()
defun_grad_fn = backprop.implicit_grad(function.defun(sum_gather))
defun_gradient = defun_grad_fn()
self.assertEqual(len(gradient), len(defun_gradient))
gradient = gradient[0][0]
defun_gradient = defun_gradient[0][0]
self.assertAllEqual(gradient.values, defun_gradient.values)
self.assertAllEqual(gradient.indices, defun_gradient.indices)
self.assertAllEqual(gradient.dense_shape, defun_gradient.dense_shape)
def testReturningIndexedSlicesWithDefun(self):
def validate(indexed_slice):
@function.defun
def f():
return indexed_slice
output = f()
self.assertTrue(isinstance(output, ops.IndexedSlices))
self.assertAllEqual(indexed_slice.values, output.values)
self.assertAllEqual(indexed_slice.indices, output.indices)
self.assertAllEqual(indexed_slice.dense_shape, output.dense_shape)
self.assertEqual(
f.get_concrete_function().output_shapes,
indexed_slice.values.shape)
arg = ops.IndexedSlices(
values=constant_op.constant([1, 2]),
indices=constant_op.constant([0, 1]),
dense_shape=constant_op.constant([2]))
validate(arg)
arg = ops.IndexedSlices(
values=constant_op.constant([1, 2]),
indices=constant_op.constant([0, 1]),
dense_shape=None)
validate(arg)
def testIndexedSliceAsArgumentWithDefun(self):
@function.defun
def f(indexed_slice):
return indexed_slice
def validate(arg):
output = f(arg)
self.assertTrue(isinstance(output, ops.IndexedSlices))
self.assertAllEqual(arg.values, output.values)
self.assertAllEqual(arg.indices, output.indices)
self.assertAllEqual(arg.dense_shape, output.dense_shape)
indexed_slice = ops.IndexedSlices(
values=constant_op.constant([1]),
indices=constant_op.constant([0]),
dense_shape=constant_op.constant([1]))
validate(indexed_slice)
# Test that `f` works even when `dense_shape` is None.
indexed_slice = ops.IndexedSlices(
values=constant_op.constant([1]),
indices=constant_op.constant([0]),
dense_shape=None)
validate(indexed_slice)
def testFunctionOnDevice(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
x = constant_op.constant([1.]).gpu()
f = function.defun(math_ops.add)
y = f(x, x).cpu()
self.assertAllEqual(y, [2.])
@test_util.run_in_graph_and_eager_modes
def testFunctionWithResourcesOnDifferentDevices(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found.')
with ops.device('/cpu:0'):
v_cpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
with ops.device('/gpu:0'):
v_gpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
def sum_gather():
cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu, [1, 2]))
gpu_result = math_ops.reduce_sum(array_ops.gather(v_gpu, [1, 2]))
return cpu_result, gpu_result
defined = function.defun(sum_gather)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
expected = self.evaluate(sum_gather())
self.assertAllEqual(expected, self.evaluate(defined()))
@test_util.run_in_graph_and_eager_modes
def testOpInFunctionWithConflictingResourceInputs(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found.')
with ops.device('/cpu:0'):
v_cpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name='cpu')
v_also_cpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name='also_cpu')
with ops.device('/gpu:0'):
v_gpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name='gpu')
@function.defun
def resource_apply_adam():
training_ops.resource_apply_adam(
v_cpu.handle,
v_gpu.handle,
v_also_cpu.handle,
1.0, # beta1_power
1.0, # beta2_power
1.0, # learning_rate
1.0, # beta1
1.0, # beta2
1.0, # epsilon,
[1.0, 1.0, 1.0], # grad
False) # use_locking
return None
with self.assertRaisesRegexp(
errors.InvalidArgumentError, 'Could not colocate node with its '
'resource and reference inputs.*'):
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.evaluate(resource_apply_adam())
def testFunctionHandlesInputsOnDifferentDevices(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
# The Reshape op requires the shape tensor to be placed in host memory.
reshape = function.defun(array_ops.reshape)
value = constant_op.constant([1., 2.]).gpu()
shape = constant_op.constant([2, 1])
reshaped = reshape(value, shape).cpu()
self.assertAllEqual(reshaped, [[1], [2]])
def testFunctionHandlesInputsPlacedOnTheWrongDeviceGracefully(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
# The Reshape op requires the shape tensor to be placed in host memory.
reshape = function.defun(array_ops.reshape)
value = constant_op.constant([1., 2.])
shape = constant_op.constant([2, 1]).gpu()
reshape(value, shape) # No error is raised
def testDifferentiableFunctionNoneOutputs(self):
@function.defun
def my_function(x):
return x, None
def wrapper(x):
return my_function(x)[0]
g = backprop.gradients_function(wrapper, [0])(constant_op.constant(0.0))
self.assertAllEqual(g[0], 1.)
@function.defun
def foo(a):
return None, a * a
x = constant_op.constant(5.0)
with backprop.GradientTape() as tp:
tp.watch(x)
none, r = foo(x)
g = tp.gradient(r, x)
self.assertIs(none, None)
self.assertAllEqual(r, 25.0)
self.assertAllEqual(g, 2 * 5.0)
def testNestedDifferentiableFunction(self):
@function.defun
def inner_fn(a, b):
return a * math_ops.add(a, b)
@function.defun
def outer_fn(x):
return inner_fn(x, 1.0)
x = constant_op.constant(5.0)
with backprop.GradientTape() as tp:
tp.watch(x)
result = outer_fn(x)
grad = tp.gradient(result, x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
def testNestedDifferentiableFunctionNoneOutputs(self):
@function.defun
def foo(a, b):
return None, a * math_ops.add(a, b), None, 2*a
@function.defun
def bar(x):
return foo(x, 1.0)
x = constant_op.constant(5.0)
with backprop.GradientTape(persistent=True) as tp:
tp.watch(x)
none1, r1, none2, r2 = bar(x)
g1 = tp.gradient(r1, x)
g2 = tp.gradient(r2, x)
self.assertAllEqual(r1, 30.0)
self.assertAllEqual(r2, 10.0)
self.assertIs(none1, None)
self.assertIs(none2, None)
self.assertAllEqual(g1, 2 * 5.0 + 1.0)
self.assertAllEqual(g2, 2.0)
def testNoneOutput(self):
@function.defun
def my_function(_):
return None
self.assertAllEqual(my_function(1), None)
def testNestedFunctions(self):
# TensorFlow function (which is what would be used in TensorFlow graph
# construction).
@tf_function.Defun(dtypes.int32, dtypes.int32)
def add(a, b):
return math_ops.add(a, b)
@function.defun
def add_one(x):
return add(x, 1)
self.assertAllEqual(3, add_one(constant_op.constant(2)))
def testVariableCaptureInNestedFunctions(self):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.int32)
@function.defun
def inner_read():
return v.read_value()
@function.defun
def outer():
return inner_read()
self.assertEqual(1, int(outer()))
def testReturnCapturedEagerTensor(self):
t = constant_op.constant(1)
@function.defun
def read():
return t
self.assertEqual(1, int(read()))
def testReturnCapturedGraphTensor(self):
with context.graph_mode(), self.cached_session():
t = constant_op.constant(1)
@function.defun
def read():
return t
self.assertEqual(1, int(self.evaluate(read())))
def testSequenceInputs(self):
clip_by_global_norm = function.defun(clip_ops.clip_by_global_norm)
t_list = [constant_op.constant(1.0), constant_op.constant(2.0)]
clipped_list, global_norm = clip_by_global_norm(t_list,
constant_op.constant(.2))
for t in clipped_list:
self.assertTrue(isinstance(t, ops.Tensor))
self.assertTrue(isinstance(global_norm, ops.Tensor))
def testNestedSequenceInputs(self):
def my_op(inputs):
a, b, c = inputs
e, f = b
g, h = e
return [a + a, [tuple([f + f, g + g]), h + h], c + c], a + f + g + h + c
my_eager_op = function.defun(my_op)
ret = my_eager_op([
constant_op.constant(1), [(constant_op.constant(2),
constant_op.constant(3)),
constant_op.constant(4)],
constant_op.constant(5)
])
self.assertEqual(len(ret), 2)
self.assertAllEqual(ret[0][0], 2)
self.assertAllEqual(ret[0][1][0][0], 8)
self.assertAllEqual(ret[0][1][0][1], 4)
self.assertTrue(isinstance(ret[0][1][0], tuple))
self.assertAllEqual(ret[0][1][1], 6)
self.assertAllEqual(ret[0][2], 10)
self.assertAllEqual(ret[1], 15)
def testVariableNamesRespectNameScopesWithDefun(self):
@function.defun
def create_variable():
with ops.name_scope('foo'):
v = resource_variable_ops.ResourceVariable(0.0, name='bar')
self.assertEqual(v.name, 'foo/bar:0')
create_variable()
def testVariableNamesRespectNameScopesWithDefunInGraph(self):
with context.graph_mode():
@function.defun
def create_variable():
with ops.name_scope('foo'):
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name='bar')
self.assertEqual(v.name, 'foo/bar:0')
with ops.get_default_graph().as_default():
create_variable()
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testLayerInDefun(self):
conv = convolutional.Conv2D(
filters=1,
kernel_size=2,
kernel_initializer=init_ops.ones_initializer(),
bias_initializer=init_ops.zeros_initializer())
@function.defun
def model(x):
return conv(x)
x = array_ops.ones([1, 2, 2, 1])
y = model(x)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([[[[4.0]]]], self.evaluate(y))
# Remove reference cycles in model
test_util.dismantle_polymorphic_function(model)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testDefunKerasModelCall(self):
model = MiniModel()
model.call = function.defun(model.call)
x = array_ops.ones([1, 2])
y = model(x)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([[3.0]], self.evaluate(y))
# Remove reference cycles in defun.
test_util.dismantle_polymorphic_function(model.call)
# Break the reference cycle between the MiniModel and the defun:
# MiniModel --(through its `call` method)--> PolymorphicFunction
# PolymorphicFunction --(instancemethod on MiniModel)--> MiniModel
del model.call
# Note: The ConfigProto below unfortunately only configures graph
# construction. Eager's configuration is controlled in `__main__`.
@test_util.run_in_graph_and_eager_modes(
config=config_pb2.ConfigProto(device_count={'CPU': 4}))
def testDeviceAnnotationsRespected(self):
def multi_device_fn():
with ops.device('/cpu:0'):
s0 = iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
with ops.device('/cpu:1'):
s1 = iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
with ops.device('/cpu:2'):
s2 = iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
s3 = iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
return s0, s1, s2, s3
defined = function.defun(multi_device_fn)
outputs = self.evaluate(defined())
self.assertEqual(len(defined._function_cache), 1)
self.assertIn(compat.as_bytes('CPU:0'), outputs[0])
self.assertIn(compat.as_bytes('CPU:1'), outputs[1])
self.assertIn(compat.as_bytes('CPU:2'), outputs[2])
with ops.device('/cpu:3'):
outputs = self.evaluate(defined())
self.assertEqual(len(defined._function_cache), 2)
self.assertIn(compat.as_bytes('CPU:0'), outputs[0])
self.assertIn(compat.as_bytes('CPU:1'), outputs[1])
self.assertIn(compat.as_bytes('CPU:2'), outputs[2])
self.assertIn(compat.as_bytes('CPU:3'), outputs[3])
# This should retrieve the call-site-device agnostic function
defined()
self.assertEqual(len(defined._function_cache), 2)
# And this should retrieve the function created for '/cpu:3'
with ops.device('/cpu:3'):
defined()
self.assertEqual(len(defined._function_cache), 2)
@test_util.run_in_graph_and_eager_modes(
config=config_pb2.ConfigProto(device_count={'CPU': 2}))
def testCallingGraphFunctionOnIncompatibleDeviceRaisesError(self):
def func():
return constant_op.constant(0)
defined = function.defun(func)
with ops.device('cpu:0'):
cpu_graph_function = defined.get_concrete_function()
with ops.device('cpu:0'):
self.assertEqual(
self.evaluate(cpu_graph_function()), self.evaluate(func()))
with self.assertRaisesRegexp(
ValueError,
'The current device stack does not match the device stack under '
'which the TensorFlow function \'.*func.*\' was created.\n'
'Current device stack: .*\n.*func.* device stack.*'):
with ops.device('cpu:1'):
cpu_graph_function()
with self.assertRaisesRegexp(
ValueError,
'The current device stack does not match the device stack under '
'which the TensorFlow function \'.*func.*\' was created.\n'
'Current device stack: .*\n.*func.* device stack.*'):
with ops.device(None):
cpu_graph_function()
default_graph_function = defined.get_concrete_function()
self.assertEqual(
self.evaluate(default_graph_function()), self.evaluate(func()))
with self.assertRaisesRegexp(
ValueError,
'The current device stack does not match the device stack under '
'which the TensorFlow function \'.*func.*\' was created.\n'
'Current device stack: .*\n.*func.* device stack.*'):
with ops.device('cpu:1'):
default_graph_function()
@test_util.run_in_graph_and_eager_modes
def testColocateWithRespected(self):
# TODO(b/113291792): Use multiple CPUs instead of a GPU.
if not context.context().num_gpus():
self.skipTest('No GPUs found.')
with ops.device('cpu:0'):
x = constant_op.constant(1.0)
with ops.device('gpu:0'):
y = constant_op.constant(1.0)
@function.defun
def foo():
return iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
with ops.colocate_with(x):
self.assertIn(compat.as_bytes('CPU:0'), self.evaluate(foo()))
with ops.colocate_with(y):
self.assertIn(compat.as_bytes('GPU:0'), self.evaluate(foo()))
def testVariablesAreTracked(self):
v = resource_variable_ops.ResourceVariable(1.0)
def foo(x):
return v * x
defined = function.defun(foo)
x = constant_op.constant([1.0])
self.assertEqual(1., self.evaluate(defined(x)))
v.assign(2.)
x = constant_op.constant([1.0, 2.0])
self.assertAllEqual([2., 4.], self.evaluate(defined(x)))
def testPythonFunctionWithDefaultArgs(self):
def func(foo, bar=1, baz=2):
del foo
del bar
del baz
return
defined = function.defun(func)
defined(0, baz=20)
def cache_keys():
"""Sanitizes cache keys of non-input metadata."""
return tuple(key[:3] for key in defined._function_cache)
# `True` corresponds to the fact that we're executing eagerly
self.assertIn((0, 1, 20), cache_keys())
defined(1) # bar=1, baz=2
self.assertIn((1, 1, 2), cache_keys())
# This matches the previous call.
defined(foo=1)
self.assertEqual(len(defined._function_cache), 2)
defined(1, 2, 3)
self.assertIn((1, 2, 3), cache_keys())
# This matches the previous call.
defined(1, bar=2, baz=3)
self.assertEqual(len(defined._function_cache), 3)
# This matches the previous call.
defined(1, baz=3, bar=2)
self.assertEqual(len(defined._function_cache), 3)
def testFunctoolsPartialUnwrappedCorrectly(self):
def full_function(a, b, c=3):
return a, b, c
partial = functools.partial(full_function, 1, c=3)
a, b, c = partial(2)
defined = function.defun(partial)
func_a, func_b, func_c = defined(2)
self.assertEqual(func_a.numpy(), a)
self.assertEqual(func_b.numpy(), b)
self.assertEqual(func_c.numpy(), c)
def testInputSignatureWithCompatibleInputs(self):
def foo(a):
self.assertEqual(a.shape, (2,))
return a
signature = [tensor_spec.TensorSpec(shape=(2,), dtype=dtypes.float32)]
defined = function.defun(foo, input_signature=signature)
a = array_ops.ones([2])
out = defined(a)
self.assertEqual(len(defined._function_cache), 1)
self.assertAllEqual(out, a)
def bar(a):
self.assertEqual(a._shape_tuple(), (2, None))
return a
signature = [tensor_spec.TensorSpec((2, None), dtypes.float32)]
defined = function.defun(bar, input_signature=signature)
a = array_ops.ones([2, 1])
out = defined(a)
self.assertEqual(len(defined._function_cache), 1)
self.assertAllEqual(out, a)
# Changing the second dimension shouldn't create a new function.
b = array_ops.ones([2, 3])
out = defined(b)
self.assertEqual(len(defined._function_cache), 1)
self.assertAllEqual(out, b)
def testNestedInputSignatures(self):
def foo(a, b):
self.assertEqual(a[0]._shape_tuple(), (2, None))
self.assertEqual(a[1]._shape_tuple(), (2, None))
self.assertEqual(b._shape_tuple(), (1,))
return [a, b]
signature = [[tensor_spec.TensorSpec((2, None), dtypes.float32)] * 2,
tensor_spec.TensorSpec((1,), dtypes.float32)]
defined = function.defun(foo, input_signature=signature)
a = array_ops.ones([2, 1])
b = array_ops.ones([1])
out = defined([a, a], b)
self.assertEqual(len(defined._function_cache), 1)
nest.assert_same_structure(out, [[a, a], b])
self.assertAllEqual(out[0][0], a)
self.assertAllEqual(out[0][1], a)
self.assertAllEqual(out[1], b)
# Changing the unspecified dimensions shouldn't create a new function.
a = array_ops.ones([2, 3])
b = array_ops.ones([2, 5])
c = array_ops.ones([1])
out = defined([a, b], c)
self.assertEqual(len(defined._function_cache), 1)
nest.assert_same_structure(out, [[a, b], c])
self.assertAllEqual(out[0][0], a)
self.assertAllEqual(out[0][1], b)
self.assertAllEqual(out[1], c)
def bar(a):
self.assertEqual(a['a']._shape_tuple(), (2, None))
self.assertEqual(a['b']._shape_tuple(), (2, None))
self.assertEqual(a['c']._shape_tuple(), (1,))
return a
signature = [{
'a': tensor_spec.TensorSpec((2, None), dtypes.float32),
'b': tensor_spec.TensorSpec((2, None), dtypes.float32),
'c': tensor_spec.TensorSpec((1,), dtypes.float32)
}]
a = array_ops.ones([2, 3])
b = array_ops.ones([1])
inputs = {'a': a, 'b': a, 'c': b}
defined = function.defun(bar, input_signature=signature)
out = defined(inputs)
nest.assert_same_structure(out, inputs)
self.assertAllEqual(out['a'], inputs['a'])
self.assertAllEqual(out['b'], inputs['b'])
self.assertAllEqual(out['c'], inputs['c'])
def testInputSignatureMustBeSequenceOfTensorSpecs(self):
def foo(a, b):
del a
del b
# Signatures must consist exclusively of `TensorSpec` objects.
signature = [(2, 3), tensor_spec.TensorSpec([2, 3], dtypes.float32)]
with self.assertRaisesRegexp(TypeError, 'Invalid input_signature.*'):
function.defun(foo, input_signature=signature)
# Signatures must be either lists or tuples on their outermost levels.
signature = {'t1': tensor_spec.TensorSpec([], dtypes.float32)}
with self.assertRaisesRegexp(TypeError, 'input_signature must be either a '
'tuple or a list.*'):
function.defun(foo, input_signature=signature)
def testInputsIncompatibleWithSignatureRaisesError(self):
def foo(a):
return a
signature = [tensor_spec.TensorSpec(shape=(2,), dtype=dtypes.float32)]
defined = function.defun(foo, input_signature=signature)
# Invalid shapes.
with self.assertRaisesRegexp(ValueError, 'Python inputs incompatible.*'):
defined(array_ops.ones([3]))
with self.assertRaisesRegexp(ValueError, 'Python inputs incompatible.*'):
defined(array_ops.ones([2, 1]))
# Wrong number of arguments.
with self.assertRaisesRegexp(ValueError,
'Structure of Python function inputs.*'):
defined(array_ops.ones([2]), array_ops.ones([2]))
with self.assertRaisesRegexp(ValueError,
'Structure of Python function inputs.*'):
defined()
def testInputSignatureForFunctionWithNonTensorInputsNotAllowed(self):
def foo(a, training=True):
if training:
return a
else:
return -1.0 * a
signature = [tensor_spec.TensorSpec([], dtypes.float32)] * 2
defined = function.defun(foo, input_signature=signature)
a = constant_op.constant(1.0)
with self.assertRaisesRegexp(
ValueError, 'When input_signature is provided, '
'all inputs to the Python function must be Tensors.'):
defined(a, training=True)
def testInputSignatureWithKeywordPositionalArgs(self):
@function.defun(input_signature=[
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.int64)
])
def foo(flt, integer):
return flt, integer
flt = constant_op.constant(1.0)
integer = constant_op.constant(2, dtypes.int64)
out1, out2 = foo(flt, integer)
self.assertEqual(len(foo._function_cache), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(flt=flt, integer=integer)
self.assertEqual(len(foo._function_cache), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(integer=integer, flt=flt)
self.assertEqual(len(foo._function_cache), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(flt, integer=integer)
self.assertEqual(len(foo._function_cache), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
def testInputSignatureWithKeywordArgsFails(self):
def foo(a, **kwargs):
del a
del kwargs
with self.assertRaisesRegexp(
ValueError, 'Cannot define a TensorFlow function from a Python '
'function with keyword arguments when input_signature.*'):
function.defun(
foo,
input_signature=[
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.int64)
])
def testTensorKeywordArguments(self):
def foo(a, b):
del a
return b
defined = function.defun(foo)
a = constant_op.constant(2.0)
b = constant_op.constant([1.0, 2.0])
one = defined(a, b)
self.assertEqual(len(defined._function_cache), 1)
two = defined(a=a, b=b)
self.assertEqual(len(defined._function_cache), 1)
three = defined(b=b, a=a)
self.assertEqual(len(defined._function_cache), 1)
four = defined(a, b=b)
self.assertEqual(len(defined._function_cache), 1)
# The next call corresponds to a new input signature, hence
# we expect another function to be defined.
five = defined(b, a)
self.assertEqual(len(defined._function_cache), 2)
six = defined(a=b, b=a)
self.assertEqual(len(defined._function_cache), 2)
seven = defined(b=a, a=b)
self.assertEqual(len(defined._function_cache), 2)
self.assertAllEqual(one, [1.0, 2.0])
self.assertAllEqual(two, [1.0, 2.0])
self.assertAllEqual(three, [1.0, 2.0])
self.assertAllEqual(four, [1.0, 2.0])
self.assertAllEqual(five, 2.0)
self.assertAllEqual(six, 2.0)
self.assertAllEqual(seven, 2.0)
def testGradientWithKeywordArguments(self):
matmul = function.defun(math_ops.matmul)
def sq(x):
return matmul(a=x, b=x, transpose_a=True)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
grad_t, = backprop.gradients_function(sq, [0])(t)
self.assertAllEqual(grad_t, [[6, 6], [14, 14]])
with backprop.GradientTape(persistent=True) as tape:
tape.watch(t)
one = matmul(t, b=t, transpose_a=True)
two = matmul(b=t, a=t, transpose_a=True)
three = matmul(a=t, b=t, transpose_a=True)
for output in [one, two, three]:
self.assertAllEqual(tape.gradient(output, t), [[6, 6], [14, 14]])
def testGradientInFunctionWithKeywordArguments(self):
@function.defun
def f(x):
return backprop.gradients_function(lambda y: y * y, [0])(x)[0]
self.assertAllEqual(f(x=constant_op.constant(1.0)), 2.0)
def testDefuningInstanceMethod(self):
integer = constant_op.constant(2, dtypes.int64)
class Foo(object):
def one(self, tensor):
return tensor
@function.defun
def two(self, tensor, other=integer):
return self.one(tensor), other
foo = Foo()
t = constant_op.constant(1.0)
one, two = foo.two(t)
self.assertEqual(one.numpy(), 1.0)
self.assertEqual(two.numpy(), 2)
def testDefuningInstanceMethodWithDefaultArgument(self):
integer = constant_op.constant(2, dtypes.int64)
class Foo(object):
@function.defun
def func(self, other=integer):
return other
foo = Foo()
self.assertEqual(foo.func().numpy(), int(integer))
def testPythonCallWithSideEffects(self):
state = []
@function.defun
def side_effecting_function():
state.append(0)
side_effecting_function()
self.assertAllEqual(state, [0])
# The second invocation should call the graph function, which shouldn't
# trigger the list append.
side_effecting_function()
self.assertAllEqual(state, [0])
# Whereas calling the python function directly should create a side-effect.
side_effecting_function.python_function()
self.assertAllEqual(state, [0, 0])
def testFunctionWithExtraAttributes(self):
@function.defun_with_attributes(attributes={'experimental_1': 'value1',
'experimental_2': 2})
def matmul(x, y):
return math_ops.matmul(x, y)
def add(x, y):
return math_ops.add(x, y)
defun_add = function.defun_with_attributes(
add, attributes={'experimental_3': True, 'experimental_4': 1.0})
with context.graph_mode(), self.test_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq = matmul(t, t)
double = defun_add(t, t)
self.assertAllEqual(sq.eval().reshape(-1), [7, 10, 15, 22])
self.assertAllEqual(double.eval().reshape(-1), [2, 4, 6, 8])
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertEqual(len(graph._functions), 2)
functions = list(graph._functions.values())
self.assertRegexpMatches(
functions[0].definition.signature.name, '.*matmul.*')
attrs = functions[0].definition.attr
self.assertEqual(len(attrs), 2)
self.assertEqual(attrs['experimental_1'].s, b'value1')
self.assertEqual(attrs['experimental_2'].i, 2)
self.assertRegexpMatches(
functions[1].definition.signature.name, '.*add.*')
attrs = functions[1].definition.attr
self.assertEqual(len(attrs), 2)
self.assertEqual(attrs['experimental_3'].b, True)
self.assertEqual(attrs['experimental_4'].f, 1.0)
# pylint: enable=protected-access
def testFunctionWithInvalidAttribute(self):
@function.defun_with_attributes(attributes={'attr1': 'value1'})
def matmul(x, y):
return math_ops.matmul(x, y)
with self.assertRaisesRegexp(ValueError,
'.*Attribute name is not whitelisted.*'):
with context.graph_mode(), self.test_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
matmul(t, t)
@function.defun_with_attributes(attributes={'experimental_1': ['value1']})
def add(x, y):
return math_ops.add(x, y)
with self.assertRaisesRegexp(ValueError,
'.*Unsupported attribute type.*'):
with context.graph_mode(), self.test_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
add(t, t)
def testRegisterFunction(self):
@function.defun
def add(x, y):
return math_ops.add(x, y)
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = function.defun(matmul)
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
function.register(defun_matmul, t, t)
function.register(add, t, t)
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertEqual(len(graph._functions), 2)
functions = list(graph._functions.values())
pre_register_matmul_func_name = functions[0].definition.signature.name
self.assertRegexpMatches(pre_register_matmul_func_name, '.*matmul.*')
pre_register_add_func_name = functions[1].definition.signature.name
self.assertRegexpMatches(pre_register_add_func_name, '.*add.*')
sq = defun_matmul(t, t)
double = add(t, t)
self.assertAllEqual(sq.eval().reshape(-1), [7, 10, 15, 22])
self.assertAllEqual(double.eval().reshape(-1), [2, 4, 6, 8])
# Make sure the pre registered function is used, and no other function
# is added.
self.assertEqual(len(graph._functions), 2)
functions = list(graph._functions.values())
called_func_name = functions[0].definition.signature.name
self.assertEqual(pre_register_matmul_func_name, called_func_name)
called_func_name = functions[1].definition.signature.name
self.assertEqual(pre_register_add_func_name, called_func_name)
def testRegisterFunctionWithInputSignature(self):
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = function.defun(
matmul,
input_signature=[
tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32)
])
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
function.register(defun_matmul, t, t)
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertEqual(len(graph._functions), 1)
# Test input param shape mismatch
t2 = constant_op.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
with self.assertRaisesRegexp(
ValueError, 'Python inputs incompatible with input_signature'):
function.register(defun_matmul, t2, t2)
def testRegisterFunctionWithCache(self):
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = function.defun(matmul)
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[2.0, 3.0], [4.0, 5.0]])
function.register(defun_matmul, t, t)
function.register(defun_matmul, t2, t2)
graph = ops.get_default_graph()
# Only one function is registered since the input param are in same type
# pylint: disable=protected-access
self.assertEqual(len(graph._functions), 1)
def testCallingFunctionWithDifferentVariables(self):
@function.defun
def foo(v):
v.assign_add(1.0)
return v.read_value()
v = resource_variable_ops.ResourceVariable(0.0)
graph_function = foo.get_concrete_function(v)
self.assertEqual(len(graph_function.inputs), 1)
self.assertEqual(len(graph_function.captured_inputs), 0)
self.assertEqual(float(graph_function(v)), 1.0)
self.assertEqual(float(graph_function(v)), 2.0)
w = resource_variable_ops.ResourceVariable(0.0)
@function.defun
def bar(v):
del v
return constant_op.constant(1.0)
graph_function = bar.get_concrete_function(v)
self.assertEqual(float(graph_function(v)), 1.0)
self.assertEqual(float(graph_function(w)), 1.0)
def testCallingFunctionWithNonTensorsFails(self):
@function.defun
def foo(x):
return x
graph_function = foo.get_concrete_function(constant_op.constant(1.0))
with self.assertRaisesRegexp(ValueError, 'All inputs to `Function`s must '
'be Tensors;.*'):
graph_function('Not a Tensor.')
@test_util.with_c_shapes
class AutomaticControlDependenciesTest(test.TestCase):
def testBasic(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
with function.AutomaticControlDependencies() as c:
v.assign(v + 1)
v.assign(2 * v)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(), 4.0)
def testCondMustRun(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
p = array_ops.placeholder(dtype=dtypes.bool)
with function.AutomaticControlDependencies() as c:
def true_fn():
v.assign(v + 1)
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 5.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 6.0)
def testCondMustRunSeparateRead(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
p = array_ops.placeholder(dtype=dtypes.bool)
with function.AutomaticControlDependencies() as c:
def true_fn():
v.assign(v + 1)
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
one = constant_op.constant(1.0)
one = c.mark_as_return(one)
one.eval(feed_dict={p: False})
self.assertAllEqual(v.read_value().eval(), 5.0)
one.eval(feed_dict={p: True})
self.assertAllEqual(v.read_value().eval(), 6.0)
def testCondNested(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
p = array_ops.placeholder(dtype=dtypes.bool)
q = array_ops.placeholder(dtype=dtypes.bool)
with function.AutomaticControlDependencies() as c:
def true_fn():
v.assign(v + 1, name='true')
return 1.0
def false_fn():
def inner_true_fn():
v.assign(v * 2, name='false_true')
return 2.0
def inner_false_fn():
v.assign(v * 3, name='false_false')
return 3.0
control_flow_ops.cond(q, inner_true_fn, inner_false_fn)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
with ops.name_scope('final'):
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False, q: False}), 3.0)
self.assertAllEqual(val.eval(feed_dict={p: False, q: True}), 6.0)
self.assertAllEqual(val.eval(feed_dict={p: True, q: True}), 7.0)
self.assertAllEqual(val.eval(feed_dict={p: True, q: False}), 8.0)
def testCondOneBranch(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
p = array_ops.placeholder(dtype=dtypes.bool)
with function.AutomaticControlDependencies() as c:
def true_fn():
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 5.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 5.0)
def testCondOneBranchUpdateBefore(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
p = array_ops.placeholder(dtype=dtypes.bool)
with function.AutomaticControlDependencies() as c:
v.assign(v * 2)
def true_fn():
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 6.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 12.0)
def testCondOneBranchUpdateAfter(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
p = array_ops.placeholder(dtype=dtypes.bool)
with function.AutomaticControlDependencies() as c:
def true_fn():
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
v.assign(v * 2)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 10.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 20.0)
def testDefunWhileLoopWithCapturedLoopVars(self):
n = 3
x = constant_op.constant(list(range(n)))
@function.defun
def loop():
c = lambda i, x: i < n
b = lambda i, x: (i + 1, x + 1)
i, out = control_flow_ops.while_loop(c, b, (0, x))
return i, out
i, out = loop()
self.assertEqual(int(i), 3)
self.assertAllEqual(out, [3, 4, 5])
def testDecorator(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
variables.global_variables_initializer().run()
@function.automatic_control_dependencies
def f():
v.assign(v + 1)
v.assign(2 * v)
return v.read_value()
self.assertAllEqual(f().eval(), 4.0)
def testOptimizerInDefun(self):
def loss(v):
return v**2
optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
@function.defun
def train():
self.v = resource_variable_ops.ResourceVariable(1.0)
grad = backprop.implicit_grad(loss)(self.v)
optimizer.apply_gradients(grad)
return self.v.read_value()
value = train()
self.assertEqual(value.numpy(), -1.0)
def testReturningNonTensorRaisesError(self):
optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
optimizer.apply_gradients = function.defun(optimizer.apply_gradients)
v = resource_variable_ops.ResourceVariable(1.0)
grad = backprop.implicit_grad(lambda v: v**2)(v)
with self.assertRaisesRegexp(TypeError,
'.*must return zero or more Tensors.*'):
# TODO(akshayka): We might want to allow defun-ing Python functions
# that return operations (and just execute the op instead of running it).
optimizer.apply_gradients(grad)
# TODO(b/111663004): This should work when the outer context is graph
# building.
def testOptimizerNonSlotVarsInDefunNoError(self):
def loss(v):
return v**2
optimizer = adam.AdamOptimizer(learning_rate=1.0)
@function.defun
def train():
self.v = resource_variable_ops.ResourceVariable(1.0)
grad = backprop.implicit_grad(loss)(self.v)
optimizer.apply_gradients(grad)
return self.v.read_value()
train()
def testOptimizerInDefunWithCapturedVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
def loss():
return v**2
optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
@function.defun
def train():
grad = backprop.implicit_grad(loss)()
optimizer.apply_gradients(grad)
train()
self.assertEqual(v.numpy(), -1.0)
def testFunctionModifiesInputList(self):
# Tests on `list` methods that do in place modification, except `list.sort`
# since it cannot even be "defunned" in the first place
def get_list():
return [constant_op.constant(0.), constant_op.constant(1.)]
expected_msg = (
'Function to be traced should not modify structure of input '
'arguments. Check if your function has list and dictionary '
'operations that alter input arguments, '
'such as `list.pop`, `list.append`')
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def append(l):
l.append(constant_op.constant(0.))
append(get_list())
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def extend(l):
l.extend([constant_op.constant(0.)])
extend(get_list())
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def insert(l):
l.insert(0, constant_op.constant(0.))
insert(get_list())
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def pop(l):
l.pop()
pop(get_list())
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def reverse(l):
l.reverse()
reverse(get_list())
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def remove(l):
l.remove(l[0])
remove(get_list())
# `list.clear` is a method that is in Py3 but not Py2
if sys.version.startswith('3'):
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def clear(l):
l.clear()
clear(get_list())
# One last test for keyword arguments
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def kwdappend(**kwargs):
l = kwargs['l']
l.append(constant_op.constant(0.))
kwdappend(l=get_list())
def testFunctionModifiesInputDict(self):
def get_dict():
return {'t1': constant_op.constant(0.), 't2': constant_op.constant(1.)}
expected_msg = (
'Function to be traced should not modify structure of input '
'arguments. Check if your function has list and dictionary '
'operations that alter input arguments, '
'such as `list.pop`, `list.append`')
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def clear(m):
m.clear()
clear(get_dict())
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def pop(m):
m.pop('t1')
pop(get_dict())
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def popitem(m):
m.popitem()
popitem(get_dict())
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def update(m):
m.update({'t1': constant_op.constant(3.)})
update(get_dict())
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def setdefault(m):
m.setdefault('t3', constant_op.constant(3.))
setdefault(get_dict())
def testFunctionModifiesInputNest(self):
# Test on functions that modify structure of nested input arguments
expected_msg = (
'Function to be traced should not modify structure of input '
'arguments. Check if your function has list and dictionary '
'operations that alter input arguments, '
'such as `list.pop`, `list.append`')
with self.assertRaisesRegexp(ValueError, expected_msg):
@function.defun
def modify(n):
n[0]['t1'].append(constant_op.constant(1.))
nested_input = [{
't1': [constant_op.constant(0.),
constant_op.constant(1.)],
},
constant_op.constant(2.)]
modify(nested_input)
with self.assertRaisesRegexp(ValueError, expected_msg):
# The flat list doesn't change whereas the true structure changes
@function.defun
def modify_same_flat(n):
n[0].append(n[1].pop(0))
nested_input = [[constant_op.constant(0.)],
[constant_op.constant(1.),
constant_op.constant(2.)]]
modify_same_flat(nested_input)
def testDecoratedMethodVariableCleanup(self):
m = DefunnedMiniModel()
m(array_ops.ones([1, 2]))
weak_variables = weakref.WeakSet(m.variables)
self.assertEqual(2, len(weak_variables))
del m
self.assertEqual([], list(weak_variables))
if __name__ == '__main__':
ops.enable_eager_execution(
config=config_pb2.ConfigProto(device_count={'CPU': 4}))
test.main()
| 31.087703
| 80
| 0.65876
|
bbac000a8c624bef379e5a6e38da02f5d1ac4f68
| 4,033
|
py
|
Python
|
src/awkward/_v2/behaviors/mixins.py
|
scikit-hep/awkward-1.0
|
a440328f8097d22c2ba053fd117fed543829afc0
|
[
"BSD-3-Clause"
] | 519
|
2019-10-17T12:36:22.000Z
|
2022-03-26T23:28:19.000Z
|
src/awkward/_v2/behaviors/mixins.py
|
scikit-hep/awkward-1.0
|
a440328f8097d22c2ba053fd117fed543829afc0
|
[
"BSD-3-Clause"
] | 924
|
2019-11-03T21:05:01.000Z
|
2022-03-31T22:44:30.000Z
|
src/awkward/_v2/behaviors/mixins.py
|
scikit-hep/awkward-1.0
|
a440328f8097d22c2ba053fd117fed543829afc0
|
[
"BSD-3-Clause"
] | 56
|
2019-12-17T15:49:22.000Z
|
2022-03-09T20:34:06.000Z
|
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
# from __future__ import absolute_import
# import sys
# import awkward as ak
# def mixin_class(registry, name=None):
# """
# Args:
# registry (dict): The destination behavior mapping registry. Typically,
# this would be the global registry #ak.behavior, but one may wish
# to register methods in an alternative way.
# name (str): The name to assign to the behaviour class.
# This decorator can be used to register a behavior mixin class.
# Any inherited behaviors will automatically be made available to the decorated
# class.
# See the "Mixin decorators" section of #ak.behavior for further details.
# """
# def register(cls):
# cls_name = cls.__name__
# if name is None:
# behavior_name = cls_name
# else:
# behavior_name = name
# record = type(
# cls_name + "Record",
# (cls, ak.highlevel.Record),
# {"__module__": cls.__module__},
# )
# setattr(sys.modules[cls.__module__], cls_name + "Record", record)
# registry[behavior_name] = record
# array = type(
# cls_name + "Array",
# (cls, ak.highlevel.Array),
# {"__module__": cls.__module__},
# )
# setattr(sys.modules[cls.__module__], cls_name + "Array", array)
# registry["*", behavior_name] = array
# for basecls in cls.mro():
# for method in basecls.__dict__.values():
# if hasattr(method, "_awkward_mixin"):
# ufunc, rhs, transpose = method._awkward_mixin
# if rhs is None:
# registry.setdefault((ufunc, behavior_name), method)
# continue
# for rhs_name in list(rhs) + [behavior_name]:
# registry.setdefault((ufunc, behavior_name, rhs_name), method)
# if transpose is not None and rhs_name != behavior_name:
# registry.setdefault(
# (ufunc, rhs_name, behavior_name), transpose
# )
# if basecls.__name__ in rhs:
# rhs.add(behavior_name)
# return cls
# return register
# def mixin_class_method(ufunc, rhs=None, transpose=True):
# """
# Args:
# ufunc (numpy.ufunc): A universal function (or NEP18 callable) that is
# hooked in Awkward Array, i.e. it can be the first argument of a behavior.
# rhs (Set[type] or None): Set of right-hand side argument types, optional
# if wrapping a unary function. The left-hand side is expected to
# always be `self` of the parent class.
# transpose (bool): If true, automatically create a transpose signature
# (only makes sense for binary ufuncs).
# This decorator can be used to register a mixin class method.
# Using this decorator ensures that derived classes that are declared with the
# #ak.mixin_class decorator will also have the behaviors that this class has.
# """
# def register(method):
# if not isinstance(rhs, (set, type(None))):
# raise ValueError(
# "expected a set of right-hand-side argument types"
# + ak._util.exception_suffix(__file__)
# )
# if transpose and rhs is not None:
# def transposed(left, right):
# return method(right, left)
# # make a copy of rhs, we will edit it later
# method._awkward_mixin = (ufunc, set(rhs), transposed)
# else:
# method._awkward_mixin = (ufunc, rhs, None)
# return method
# return register
# __all__ = [
# x for x in list(globals()) if not x.startswith("_") and x not in ("sys", "ak")
# ]
# def __dir__():
# return __all__
| 36.333333
| 87
| 0.567072
|
cdefdbe0d3cc9323336cdfd5f16c74444b5ecd5c
| 657
|
py
|
Python
|
apps/base/migrations/0007_auto_20180525_0906.py
|
KolevDarko/lifehq
|
88d92f5fe76f2fb6511f2a892e096d95a69985d8
|
[
"MIT"
] | null | null | null |
apps/base/migrations/0007_auto_20180525_0906.py
|
KolevDarko/lifehq
|
88d92f5fe76f2fb6511f2a892e096d95a69985d8
|
[
"MIT"
] | null | null | null |
apps/base/migrations/0007_auto_20180525_0906.py
|
KolevDarko/lifehq
|
88d92f5fe76f2fb6511f2a892e096d95a69985d8
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0 on 2018-05-25 09:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('base', '0006_auto_20180521_0534'),
]
operations = [
migrations.AddField(
model_name='projecttodolist',
name='archived',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='projectpage',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pages', to='base.Project'),
),
]
| 26.28
| 122
| 0.621005
|
9c0ebfc4be47cf915649db649edf264bfa5105d1
| 2,342
|
py
|
Python
|
trinity/sync/beam/constants.py
|
teotoplak/trinity
|
6c67b5debfb94f74d0162c70f92ae3d13918b174
|
[
"MIT"
] | null | null | null |
trinity/sync/beam/constants.py
|
teotoplak/trinity
|
6c67b5debfb94f74d0162c70f92ae3d13918b174
|
[
"MIT"
] | 2
|
2019-04-30T06:22:12.000Z
|
2019-06-14T04:27:18.000Z
|
trinity/sync/beam/constants.py
|
teotoplak/trinity
|
6c67b5debfb94f74d0162c70f92ae3d13918b174
|
[
"MIT"
] | null | null | null |
from eth.constants import MAX_UNCLE_DEPTH
# Peers are typically expected to have predicted nodes available,
# so it's reasonable to ask for all-predictive nodes from a peer.
# Urgent node requests usually come in pretty fast, so
# even at a small value (like 1ms), this timeout is rarely triggered.
DELAY_BEFORE_NON_URGENT_REQUEST = 0.05
# How much large should our buffer be? This is a multiplier on how many
# nodes we can request at once from a single peer.
REQUEST_BUFFER_MULTIPLIER = 16
# How many different processes are running previews? They will split the
# block imports equally. A higher number means a slower startup, but more
# previews are possible at a time (given that you have enough CPU cores).
# The sensitivity of this number is relatively unexplored.
NUM_PREVIEW_SHARDS = 4
# How many speculative executions should we run concurrently? This is
# a global number, not per process or thread. It is necessary to
# constrain the I/O, which can become the global bottleneck.
MAX_CONCURRENT_SPECULATIVE_EXECUTIONS = 40
MAX_SPECULATIVE_EXECUTIONS_PER_PROCESS = MAX_CONCURRENT_SPECULATIVE_EXECUTIONS // NUM_PREVIEW_SHARDS
# If a peer does something not ideal, give it a little time to breath,
# and maybe to try out another peeer. Then reinsert it relatively soon.
# Measured in seconds.
NON_IDEAL_RESPONSE_PENALTY = 0.5
# How many seconds should we leave the backfill peer idle, in between
# backfill requests? This is called "tests" because we are importantly
# checking how fast a peer is.
GAP_BETWEEN_TESTS = 0.25
# One reason to leave this as non-zero is: if we are regularly switching
# the "queen peer" then we want to improve the chances that the new queen
# (formerly backfill) is idle and ready to serve urgent nodes.
# Another reason to leave this as non-zero: we don't want to overload the
# database with reads/writes, but there are probably better ways to acheive
# that goal.
# One reason to make it relatively short, is that we want to find out quickly
# when a new peer has excellent service stats. It might take several requests
# to establish it (partially because we measure using an exponential average).
# We need MAX_UNCLE_DEPTH + 1 headers to check during uncle validation
# We need to request one more header, to set the starting tip
FULL_BLOCKS_NEEDED_TO_START_BEAM = MAX_UNCLE_DEPTH + 2
| 49.829787
| 100
| 0.787361
|
633427e3c348e2ff6317b93bbfadfb3e3610dcfb
| 1,982
|
py
|
Python
|
thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t004lexer.py
|
mail2nsrajesh/congress
|
a724dfb59c43a5e88e2b03e714a5f962d6976762
|
[
"Apache-2.0"
] | 50
|
2015-04-21T14:12:01.000Z
|
2020-06-01T06:23:13.000Z
|
thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t004lexer.py
|
mail2nsrajesh/congress
|
a724dfb59c43a5e88e2b03e714a5f962d6976762
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t004lexer.py
|
mail2nsrajesh/congress
|
a724dfb59c43a5e88e2b03e714a5f962d6976762
|
[
"Apache-2.0"
] | 25
|
2015-05-22T04:02:33.000Z
|
2020-01-14T12:15:12.000Z
|
import antlr3
import testbase
import unittest
class t004lexer(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def emitErrorMessage(self, msg):
# report errors to /dev/null
pass
def reportError(self, re):
# no error recovery yet, just crash!
raise re
return TLexer
def testValid(self):
stream = antlr3.StringStream('ffofoofooo')
lexer = self.getLexer(stream)
token = lexer.nextToken()
self.assertEqual(token.type, self.lexerModule.FOO)
self.assertEqual(token.start, 0)
self.assertEqual(token.stop, 0)
self.assertEqual(token.text, 'f')
token = lexer.nextToken()
self.assertEqual(token.type, self.lexerModule.FOO)
self.assertEqual(token.start, 1)
self.assertEqual(token.stop, 2)
self.assertEqual(token.text, 'fo')
token = lexer.nextToken()
self.assertEqual(token.type, self.lexerModule.FOO)
self.assertEqual(token.start, 3)
self.assertEqual(token.stop, 5)
self.assertEqual(token.text, 'foo')
token = lexer.nextToken()
self.assertEqual(token.type, self.lexerModule.FOO)
self.assertEqual(token.start, 6)
self.assertEqual(token.stop, 9)
self.assertEqual(token.text, 'fooo')
token = lexer.nextToken()
self.assertEqual(token.type, self.lexerModule.EOF)
def testMalformedInput(self):
stream = antlr3.StringStream('2')
lexer = self.getLexer(stream)
try:
token = lexer.nextToken()
self.fail()
except antlr3.MismatchedTokenException as exc:
self.assertEqual(exc.expecting, 'f')
self.assertEqual(exc.unexpectedType, '2')
if __name__ == '__main__':
unittest.main()
| 27.915493
| 58
| 0.593845
|
37777aa1193466023cc5720027a0bbfbc4b83d56
| 1,699
|
py
|
Python
|
torch_geometric/utils/sbm.py
|
Mirwaisse/pytorch_geometric
|
0b0022e7c1dce9864184d2d666984cd35dff47e0
|
[
"MIT"
] | 1
|
2019-10-16T09:14:19.000Z
|
2019-10-16T09:14:19.000Z
|
torch_geometric/utils/sbm.py
|
cuiqiang1990/pytorch_geometric
|
a7143b8d9ace60cf2ec1bd14ecc20ff7c3141151
|
[
"MIT"
] | null | null | null |
torch_geometric/utils/sbm.py
|
cuiqiang1990/pytorch_geometric
|
a7143b8d9ace60cf2ec1bd14ecc20ff7c3141151
|
[
"MIT"
] | 1
|
2020-09-21T22:01:18.000Z
|
2020-09-21T22:01:18.000Z
|
import torch
from torch_geometric.utils import to_undirected
def stochastic_blockmodel_graph(block_sizes, edge_probs, directed=False):
r"""Returns the :obj:`edge_index` of a stochastic blockmodel graph.
Args:
block_sizes ([int] or LongTensor): The sizes of blocks.
edge_probs ([[float]] or FloatTensor): The density of edges going
from each block to each other block. Must be symmetric if the graph is
undirected.
directed (bool, optional): If set to :obj:`True`, will return a
directed graph. (default: :obj:`False`)
"""
size, prob = block_sizes, edge_probs
if not torch.is_tensor(size):
size = torch.tensor(size, dtype=torch.long)
if not torch.is_tensor(prob):
prob = torch.tensor(prob, dtype=torch.float)
assert size.dim() == 1
assert prob.dim() == 2 and prob.size(0) == prob.size(1)
assert size.size(0) == prob.size(0)
if not directed:
assert torch.allclose(prob, prob.t())
node_idx = torch.cat([size.new_full((b, ), i) for i, b in enumerate(size)])
num_nodes = node_idx.size(0)
if directed:
idx = torch.arange((num_nodes - 1) * num_nodes)
idx = idx.view(num_nodes - 1, num_nodes)
idx = idx + torch.arange(1, num_nodes).view(-1, 1)
idx = idx.view(-1)
row, col = idx / num_nodes, idx % num_nodes
else:
row, col = torch.combinations(torch.arange(num_nodes)).t()
mask = torch.bernoulli(prob[node_idx[row], node_idx[col]]).to(torch.uint8)
edge_index = torch.stack([row[mask], col[mask]], dim=0)
if not directed:
edge_index = to_undirected(edge_index, num_nodes)
return edge_index
| 34.673469
| 79
| 0.642731
|
b4198e9291b70f208145e6ab4ba2e5c8a0e06a3c
| 10,990
|
py
|
Python
|
tensorpack_medical/models/conv3d.py
|
Jdorri/rl-medical
|
750b1f10413daa9cd8e346332bd844212e76ddd8
|
[
"Apache-2.0"
] | 96
|
2019-10-16T19:20:21.000Z
|
2022-03-22T02:35:01.000Z
|
tensorpack_medical/models/conv3d.py
|
Helogn/rl-medical
|
c9d4006262ab16dc04980215778a740343ea5dcd
|
[
"Apache-2.0"
] | 78
|
2020-01-19T10:47:31.000Z
|
2020-05-13T11:13:05.000Z
|
tensorpack_medical/models/conv3d.py
|
Helogn/rl-medical
|
c9d4006262ab16dc04980215778a740343ea5dcd
|
[
"Apache-2.0"
] | 42
|
2019-10-12T07:48:23.000Z
|
2021-12-01T22:37:41.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: conv3d.py
# Author: Amir Alansary <amiralansary@gmail.com>
import tensorflow as tf
from tensorpack import layer_register, VariableHolder
from tensorpack.tfutils.common import get_tf_version_tuple
from .tflayer import rename_get_variable, convert_to_tflayer_args
from tensorpack_medical.utils.argtools import shape3d, shape5d, get_data_format3d
@layer_register(log_shape=True)
@convert_to_tflayer_args(
args_names=['filters', 'kernel_size'],
name_mapping={
'out_channel': 'filters',
'kernel_shape': 'kernel_size',
'stride': 'strides',
})
def Conv3D(
inputs,
filters,
kernel_size,
strides=(1, 1, 1),
padding='same',
data_format='channels_last',
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(2.0),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
split=1):
"""
A wrapper around `tf.layers.Conv3D`.
Some differences to maintain backward-compatibility:
1. Default kernel initializer is variance_scaling_initializer(2.0).
2. Default padding is 'same'.
3. Support 'split' argument to do group conv.
Variable Names:
* ``W``: weights
* ``b``: bias
"""
if split == 1:
with rename_get_variable({'kernel': 'W', 'bias': 'b'}):
layer = tf.layers.Conv3D(
filters,
kernel_size,
strides=strides,
padding=padding,
data_format='channels_last',
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
ret = tf.identity(ret, name='output')
ret.variables = VariableHolder(W=layer.kernel)
if use_bias:
ret.variables.b = layer.bias
else:
# group conv implementation
data_format = get_data_format3d(data_format, tfmode=False)
in_shape = inputs.get_shape().as_list()
channel_axis = 4 if data_format == 'NDHWC' else 1
in_channel = in_shape[channel_axis]
assert in_channel is not None, "[Conv3D] Input cannot have unknown channel!"
assert in_channel % split == 0
assert kernel_regularizer is None and bias_regularizer is None and activity_regularizer is None, \
"Not supported by group conv now!"
out_channel = filters
assert out_channel % split == 0
assert dilation_rate == (1, 1, 1) or get_tf_version_tuple() >= (1, 5), 'TF>=1.5 required for group dilated conv'
kernel_shape = shape3d(kernel_size)
filter_shape = kernel_shape + [in_channel / split, out_channel]
stride = shape5d(strides, data_format=data_format)
kwargs = dict(data_format=data_format)
if get_tf_version_tuple() >= (1, 5):
kwargs['dilations'] = shape4d(dilation_rate, data_format=data_format)
W = tf.get_variable(
'W', filter_shape, initializer=kernel_initializer)
if use_bias:
b = tf.get_variable('b', [out_channel], initializer=bias_initializer)
inputs = tf.split(inputs, split, channel_axis)
# tf.split(value,num_or_size_splits,axis=0, num=None,name='split')
kernels = tf.split(W, split, 4)
outputs = [tf.nn.conv3d(i, k, stride, padding.upper(), **kwargs)
for i, k in zip(inputs, kernels)]
conv = tf.concat(outputs, channel_axis)
if activation is None:
activation = tf.identity
ret = activation(tf.nn.bias_add(conv, b, data_format=data_format) if use_bias else conv, name='output')
ret.variables = VariableHolder(W=W)
if use_bias:
ret.variables.b = b
return ret
# @layer_register(log_shape=True)
# def Conv3D(x, out_channel, kernel_shape,
# padding='SAME', stride=1,
# W_init=None, b_init=None,
# nl=tf.identity, split=1, use_bias=True,
# data_format='NDHWC'):
# """
# 3D convolution on 5D inputs.
# Args:
# x (tf.Tensor): a 5D tensor.
# Must have known number of channels, but can have other unknown dimensions.
# out_channel (int): number of output channel.
# kernel_shape: (d, h, w) tuple or a int.
# stride: (d, h, w) tuple or a int.
# padding (str): 'valid' or 'same'. Case insensitive.
# split (int): Split channels as used in Alexnet. Defaults to 1 (no split).
# W_init: initializer for W. Defaults to `variance_scaling_initializer`.
# b_init: initializer for b. Defaults to zero.
# nl: a nonlinearity function.
# use_bias (bool): whether to use bias.
# data_format: An optional string from: "NDHWC", "NCDHW".
# Defaults to "NDHWC". The data format of the input and output data.
# With the default format "NDHWC", the data is stored in the order
# of: [batch, in_depth, in_height, in_width, in_channels].
# Alternatively, the format could be "NCDHW", the data storage order
# is: [batch, in_channels, in_depth, in_height, in_width].
# Returns:
# tf.Tensor named ``output`` with attribute `variables`.
# Variable Names:
# * ``W``: weights
# * ``b``: bias
# """
# in_shape = x.get_shape().as_list()
# channel_axis = 4 if data_format == 'NDHWC' else 1
# in_channel = in_shape[channel_axis]
# assert in_channel is not None, "[Conv3D] Input cannot have unknown channel!"
# assert in_channel % split == 0
# assert out_channel % split == 0
# kernel_shape = shape3d(kernel_shape)
# padding = padding.upper()
# filter_shape = kernel_shape + [in_channel / split, out_channel]
# stride = shape5d(stride, data_format=data_format)
# if W_init is None:
# W_init = tf.contrib.layers.variance_scaling_initializer()
# if b_init is None:
# b_init = tf.constant_initializer()
# W = tf.get_variable('W', filter_shape, initializer=W_init)
# if use_bias:
# b = tf.get_variable('b', [out_channel], initializer=b_init)
# if split == 1:
# conv = tf.nn.conv3d(x, W, stride, padding, data_format=data_format)
# else:
# inputs = tf.split(x, split, channel_axis)
# kernels = tf.split(W, split, 3) # todo: this should be 3 or 4?
# outputs = [tf.nn.conv3d(i, k, stride, padding, data_format=data_format)
# for i, k in zip(inputs, kernels)]
# conv = tf.concat(outputs, channel_axis)
# # todo: check data format in bias_add
# ret = nl(tf.nn.bias_add(conv, b, data_format='NHWC') if use_bias else conv, name='output')
# ret.variables = VariableHolder(W=W)
# if use_bias:
# ret.variables.b = b
# return ret
@layer_register(log_shape=True)
def Deconv3D(x, out_shape, kernel_shape,
stride, padding='SAME',
W_init=None, b_init=None,
nl=tf.identity, use_bias=True,
data_format='NDHWC'):
"""
3D deconvolution on 5D inputs.
Args:
x (tf.Tensor): a tensor of shape NDHWC.
Must have known number of channels, but can have other unknown dimensions.
out_shape: (d, h, w, channel) tuple, or just a integer channel,
then (d, h, w) will be calculated by input_shape * stride
kernel_shape: (d, h, w) tuple or a int.
stride: (h, w) tuple or a int.
padding (str): 'valid' or 'same'. Case insensitive.
W_init: initializer for W. Defaults to `variance_scaling_initializer`.
b_init: initializer for b. Defaults to zero.
nl: a nonlinearity function.
use_bias (bool): whether to use bias.
Returns:
tf.Tensor: a NDHWC tensor named ``output`` with attribute `variables`.
Variable Names:
* ``W``: weights
* ``b``: bias
"""
in_shape = x.get_shape().as_list()
channel_axis = 4 if data_format == 'NDHWC' else 1
in_channel = in_shape[channel_axis]
assert in_channel is not None, "[Deconv3D] Input cannot have unknown channel!"
kernel_shape = shape3d(kernel_shape)
stride3d = shape3d(stride)
stride5d = shape5d(stride, data_format=data_format)
padding = padding.upper()
in_shape_dyn = tf.shape(x)
if isinstance(out_shape, int):
out_channel = out_shape
if data_format == 'NDHWC':
shp3_0 = StaticDynamicAxis(in_shape[1], in_shape_dyn[1]).apply(lambda x: stride3d[0] * x)
shp3_1 = StaticDynamicAxis(in_shape[2], in_shape_dyn[2]).apply(lambda x: stride3d[1] * x)
shp3_2 = StaticDynamicAxis(in_shape[3], in_shape_dyn[3]).apply(lambda x: stride3d[2] * x)
shp3_dyn = [shp3_0.dynamic, shp3_1.dynamic, shp3_2.dynamic, out_channel]
shp3_static = [shp3_0.static, shp3_1.static, shp3_2.static, out_channel]
else:
shp3_0 = StaticDynamicAxis(in_shape[2], in_shape_dyn[2]).apply(lambda x: stride3d[0] * x)
shp3_1 = StaticDynamicAxis(in_shape[3], in_shape_dyn[3]).apply(lambda x: stride3d[1] * x)
shp3_2 = StaticDynamicAxis(in_shape[4], in_shape_dyn[4]).apply(lambda x: stride3d[2] * x)
shp3_dyn = [out_channel, shp3_0.dynamic, shp3_1.dynamic, shp3_2.dynamic]
shp3_static = [out_channel, shp3_0.static, shp3_1.static, shp3_2.static]
else:
for k in out_shape:
if not isinstance(k, int):
raise ValueError("[Deconv3D] out_shape {} is invalid!".format(k))
out_channel = out_shape[channel_axis - 1] # out_shape doesn't have batch
shp3_static = shp3_dyn = out_shape
filter_shape = kernel_shape + [out_channel, in_channel]
if W_init is None:
W_init = tf.contrib.layers.variance_scaling_initializer() # xavier_initializer_conv2d()
if b_init is None:
b_init = tf.constant_initializer()
W = tf.get_variable('W', filter_shape, initializer=W_init)
if use_bias:
b = tf.get_variable('b', [out_channel], initializer=b_init)
out_shape_dyn = tf.stack([tf.shape(x)[0]] + shp3_dyn)
conv = tf.nn.conv3d_transpose(
x, W, out_shape_dyn, stride5d, padding=padding, data_format=data_format)
conv.set_shape(tf.TensorShape([None] + shp3_static))
ret = nl(tf.nn.bias_add(conv, b, data_format='NDHWC') if use_bias else conv, name='output')
ret.variables = VariableHolder(W=W)
if use_bias:
ret.variables.b = b
return ret
| 40.109489
| 120
| 0.631483
|
8757ecd60cee87ee7869a3cd07a99cd0b2d9bca6
| 272
|
py
|
Python
|
src/homework/homework10/score_entry.py
|
acc-cosc-1336/cosc-1336-spring-2018-EricScotty
|
80c0249a583dc178cfc7bb95b851d7f3240dc3e9
|
[
"MIT"
] | null | null | null |
src/homework/homework10/score_entry.py
|
acc-cosc-1336/cosc-1336-spring-2018-EricScotty
|
80c0249a583dc178cfc7bb95b851d7f3240dc3e9
|
[
"MIT"
] | null | null | null |
src/homework/homework10/score_entry.py
|
acc-cosc-1336/cosc-1336-spring-2018-EricScotty
|
80c0249a583dc178cfc7bb95b851d7f3240dc3e9
|
[
"MIT"
] | null | null | null |
class ScoreEntry:
def __init__(self, score_entry_id, die1_value, die2_value):
#create public class attributes for each parameter
self.score_entry_id = score_entry_id
self.die1_value = die1_value
self.die2_value = die2_value
| 27.2
| 64
| 0.6875
|
bbc9459f8ee072c794d5910c8b84ab0858127f37
| 3,395
|
py
|
Python
|
lib/services/clouddb/ncloud_clouddb/model/common_code.py
|
NaverCloudPlatform/ncloud-sdk-python
|
5976dfabd205c615fcf57ac2f0ab67313ee6953c
|
[
"MIT"
] | 12
|
2018-11-20T04:30:49.000Z
|
2021-11-09T12:34:26.000Z
|
lib/services/clouddb/ncloud_clouddb/model/common_code.py
|
NaverCloudPlatform/ncloud-sdk-python
|
5976dfabd205c615fcf57ac2f0ab67313ee6953c
|
[
"MIT"
] | 1
|
2019-01-24T15:56:15.000Z
|
2019-05-31T07:56:55.000Z
|
lib/services/clouddb/ncloud_clouddb/model/common_code.py
|
NaverCloudPlatform/ncloud-sdk-python
|
5976dfabd205c615fcf57ac2f0ab67313ee6953c
|
[
"MIT"
] | 6
|
2018-06-29T03:45:50.000Z
|
2022-03-18T01:51:45.000Z
|
# coding: utf-8
"""
clouddb
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CommonCode(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'code': 'str',
'code_name': 'str'
}
attribute_map = {
'code': 'code',
'code_name': 'codeName'
}
def __init__(self, code=None, code_name=None): # noqa: E501
"""CommonCode - a model defined in Swagger""" # noqa: E501
self._code = None
self._code_name = None
self.discriminator = None
if code is not None:
self.code = code
if code_name is not None:
self.code_name = code_name
@property
def code(self):
"""Gets the code of this CommonCode. # noqa: E501
:return: The code of this CommonCode. # noqa: E501
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this CommonCode.
:param code: The code of this CommonCode. # noqa: E501
:type: str
"""
self._code = code
@property
def code_name(self):
"""Gets the code_name of this CommonCode. # noqa: E501
:return: The code_name of this CommonCode. # noqa: E501
:rtype: str
"""
return self._code_name
@code_name.setter
def code_name(self, code_name):
"""Sets the code_name of this CommonCode.
:param code_name: The code_name of this CommonCode. # noqa: E501
:type: str
"""
self._code_name = code_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CommonCode):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.963235
| 80
| 0.541973
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.